hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f713e7fc29e485533a2b6dde959137a75ce66d30 | 7,898 | py | Python | gammapy/estimators/tests/test_flux_point.py | LauraOlivera/gammapy | 0643bb772c86092f758efad745f248a517658013 | [
"BSD-3-Clause"
] | 155 | 2015-02-25T12:38:02.000Z | 2022-03-13T17:54:30.000Z | gammapy/estimators/tests/test_flux_point.py | LauraOlivera/gammapy | 0643bb772c86092f758efad745f248a517658013 | [
"BSD-3-Clause"
] | 3,131 | 2015-01-06T15:36:23.000Z | 2022-03-31T17:30:57.000Z | gammapy/estimators/tests/test_flux_point.py | registerrier/gammapy | 8aadf0ec524bcf51d0ac5655a04507d5d449e7ed | [
"BSD-3-Clause"
] | 158 | 2015-03-16T20:36:44.000Z | 2022-03-30T16:05:37.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.table import Table
from gammapy.catalog.fermi import SourceCatalog3FGL
from gammapy.estimators import FluxPoints
from gammapy.modeling.models import SpectralModel
from gammapy.utils.scripts import make_path
from gammapy.utils.testing import (
assert_quantity_allclose,
mpl_plot_check,
requires_data,
requires_dependency,
)
FLUX_POINTS_FILES = [
"diff_flux_points.ecsv",
"diff_flux_points.fits",
"flux_points.ecsv",
"flux_points.fits",
]
class LWTestModel(SpectralModel):
@staticmethod
def evaluate(x):
return 1e4 * np.exp(-6 * x)
def integral(self, xmin, xmax, **kwargs):
return -1.0 / 6 * 1e4 * (np.exp(-6 * xmax) - np.exp(-6 * xmin))
def inverse(self, y):
return -1.0 / 6 * np.log(y * 1e-4)
class XSqrTestModel(SpectralModel):
@staticmethod
def evaluate(x):
return x ** 2
def integral(self, xmin, xmax, **kwargs):
return 1.0 / 3 * (xmax ** 3 - xmin ** 2)
def inverse(self, y):
return np.sqrt(y)
class ExpTestModel(SpectralModel):
@staticmethod
def evaluate(x):
return np.exp(x * u.Unit("1 / TeV"))
def integral(self, xmin, xmax, **kwargs):
return np.exp(xmax * u.Unit("1 / TeV")) - np.exp(xmin * u.Unit("1 / TeV"))
def inverse(self, y):
return np.log(y * u.TeV) * u.TeV
def test_energy_ref_lafferty():
"""
Tests Lafferty & Wyatt x-point method.
Using input function g(x) = 10^4 exp(-6x) against
check values from paper Lafferty & Wyatt. Nucl. Instr. and Meth. in Phys.
Res. A 355 (1995) 541-547, p. 542 Table 1
"""
# These are the results from the paper
desired = np.array([0.048, 0.190, 0.428, 0.762])
model = LWTestModel()
energy_min = np.array([0.0, 0.1, 0.3, 0.6])
energy_max = np.array([0.1, 0.3, 0.6, 1.0])
actual = FluxPoints._energy_ref_lafferty(model, energy_min, energy_max)
assert_allclose(actual, desired, atol=1e-3)
@pytest.mark.xfail
def test_dnde_from_flux():
"""Tests y-value normalization adjustment method.
"""
table = Table()
table["e_min"] = np.array([10, 20, 30, 40])
table["e_max"] = np.array([20, 30, 40, 50])
table["flux"] = np.array([42, 52, 62, 72]) # 'True' integral flux in this test bin
# Get values
model = XSqrTestModel()
table["e_ref"] = FluxPoints._energy_ref_lafferty(model, table["e_min"], table["e_max"])
dnde = FluxPoints.from_table(table, reference_model=model)
# Set up test case comparison
dnde_model = model(table["e_ref"])
# Test comparison result
desired = model.integral(table["e_min"], table["e_max"])
# Test output result
actual = table["flux"] * (dnde_model / dnde)
# Compare
assert_allclose(actual, desired, rtol=1e-6)
@pytest.mark.xfail
@pytest.mark.parametrize("method", ["table", "lafferty", "log_center"])
def test_compute_flux_points_dnde_exp(method):
"""
Tests against analytical result or result from a powerlaw.
"""
model = ExpTestModel()
energy_min = [1.0, 10.0] * u.TeV
energy_max = [10.0, 100.0] * u.TeV
table = Table()
table.meta["SED_TYPE"] = "flux"
table["e_min"] = energy_min
table["e_max"] = energy_max
flux = model.integral(energy_min, energy_max)
table["flux"] = flux
if method == "log_center":
energy_ref = np.sqrt(energy_min * energy_max)
elif method == "table":
energy_ref = [2.0, 20.0] * u.TeV
elif method == "lafferty":
energy_ref = FluxPoints._energy_ref_lafferty(model, energy_min, energy_max)
table["e_ref"] = energy_ref
result = FluxPoints.from_table(table, reference_model=model)
# Test energy
actual = result.energy_ref
assert_quantity_allclose(actual, energy_ref, rtol=1e-8)
# Test flux
actual = result.dnde
desired = model(energy_ref)
assert_quantity_allclose(actual, desired, rtol=1e-8)
@requires_data()
def test_fermi_to_dnde():
from gammapy.catalog import SourceCatalog4FGL
catalog_4fgl = SourceCatalog4FGL("$GAMMAPY_DATA/catalogs/fermi/gll_psc_v20.fit.gz")
src = catalog_4fgl["FGES J1553.8-5325"]
fp = src.flux_points
assert_allclose(
fp.dnde.quantity[1, 0, 0],
4.567393e-10 * u.Unit("cm-2 s-1 MeV-1"),
rtol=1e-5,
)
@pytest.fixture(params=FLUX_POINTS_FILES, scope="session")
def flux_points(request):
path = "$GAMMAPY_DATA/tests/spectrum/flux_points/" + request.param
return FluxPoints.read(path)
@pytest.fixture(scope="session")
def flux_points_likelihood():
path = "$GAMMAPY_DATA/tests/spectrum/flux_points/binlike.fits"
return FluxPoints.read(path)
@requires_data()
class TestFluxPoints:
def test_info(self, flux_points):
info = str(flux_points)
assert "geom" in info
assert "axes" in info
assert "ref. model" in info
assert "quantities" in info
def test_energy_ref(self, flux_points):
actual = flux_points.energy_ref
desired = np.sqrt(flux_points.energy_min * flux_points.energy_max)
assert_quantity_allclose(actual, desired)
def test_energy_min(self, flux_points):
actual = flux_points.energy_min
desired = 299530.97 * u.MeV
assert_quantity_allclose(actual.sum(), desired)
def test_energy_max(self, flux_points):
actual = flux_points.energy_max
desired = 399430.975 * u.MeV
assert_quantity_allclose(actual.sum(), desired)
def test_write_fits(self, tmp_path, flux_points):
flux_points.write(tmp_path / "tmp.fits", sed_type=flux_points.sed_type_init)
actual = FluxPoints.read(tmp_path / "tmp.fits")
assert str(flux_points) == str(actual)
def test_write_ecsv(self, tmp_path, flux_points):
flux_points.write(tmp_path / "flux_points.ecsv", sed_type=flux_points.sed_type_init)
actual = FluxPoints.read(tmp_path / "flux_points.ecsv")
assert str(flux_points) == str(actual)
def test_quantity_access(self, flux_points_likelihood):
assert flux_points_likelihood.sqrt_ts
assert flux_points_likelihood.ts
assert flux_points_likelihood.stat
assert_allclose(flux_points_likelihood.n_sigma_ul, 2)
assert flux_points_likelihood.sed_type_init == "likelihood"
@requires_dependency("matplotlib")
def test_plot(self, flux_points):
with mpl_plot_check():
flux_points.plot()
@requires_dependency("matplotlib")
def test_plot_likelihood(self, flux_points_likelihood):
with mpl_plot_check():
flux_points_likelihood.plot_ts_profiles()
@requires_dependency("matplotlib")
def test_plot_likelihood_error(self, flux_points_likelihood):
del flux_points_likelihood._data["stat_scan"]
with pytest.raises(AttributeError):
flux_points_likelihood.plot_ts_profiles()
@requires_data()
def test_compute_flux_points_dnde_fermi():
"""
Test compute_flux_points_dnde on fermi source.
"""
fermi_3fgl = SourceCatalog3FGL()
source = fermi_3fgl["3FGL J0835.3-4510"]
flux_points = source.flux_points
table = source.flux_points_table
for column in ["e2dnde", "e2dnde_errn", "e2dnde_errp", "e2dnde_ul"]:
actual = table[column].quantity
desired = getattr(flux_points, column).quantity.squeeze()
assert_quantity_allclose(actual[:-1], desired[:-1], rtol=0.05)
@requires_data()
@requires_dependency("matplotlib")
def test_plot_fp_no_ul():
path = make_path("$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits")
table = Table.read(path)
table.remove_column('dnde_ul')
fp = FluxPoints.from_table(table, sed_type='dnde')
with mpl_plot_check():
fp.plot()
| 30.851563 | 92 | 0.67688 |
import pytest
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.table import Table
from gammapy.catalog.fermi import SourceCatalog3FGL
from gammapy.estimators import FluxPoints
from gammapy.modeling.models import SpectralModel
from gammapy.utils.scripts import make_path
from gammapy.utils.testing import (
assert_quantity_allclose,
mpl_plot_check,
requires_data,
requires_dependency,
)
FLUX_POINTS_FILES = [
"diff_flux_points.ecsv",
"diff_flux_points.fits",
"flux_points.ecsv",
"flux_points.fits",
]
class LWTestModel(SpectralModel):
@staticmethod
def evaluate(x):
return 1e4 * np.exp(-6 * x)
def integral(self, xmin, xmax, **kwargs):
return -1.0 / 6 * 1e4 * (np.exp(-6 * xmax) - np.exp(-6 * xmin))
def inverse(self, y):
return -1.0 / 6 * np.log(y * 1e-4)
class XSqrTestModel(SpectralModel):
@staticmethod
def evaluate(x):
return x ** 2
def integral(self, xmin, xmax, **kwargs):
return 1.0 / 3 * (xmax ** 3 - xmin ** 2)
def inverse(self, y):
return np.sqrt(y)
class ExpTestModel(SpectralModel):
@staticmethod
def evaluate(x):
return np.exp(x * u.Unit("1 / TeV"))
def integral(self, xmin, xmax, **kwargs):
return np.exp(xmax * u.Unit("1 / TeV")) - np.exp(xmin * u.Unit("1 / TeV"))
def inverse(self, y):
return np.log(y * u.TeV) * u.TeV
def test_energy_ref_lafferty():
desired = np.array([0.048, 0.190, 0.428, 0.762])
model = LWTestModel()
energy_min = np.array([0.0, 0.1, 0.3, 0.6])
energy_max = np.array([0.1, 0.3, 0.6, 1.0])
actual = FluxPoints._energy_ref_lafferty(model, energy_min, energy_max)
assert_allclose(actual, desired, atol=1e-3)
@pytest.mark.xfail
def test_dnde_from_flux():
table = Table()
table["e_min"] = np.array([10, 20, 30, 40])
table["e_max"] = np.array([20, 30, 40, 50])
table["flux"] = np.array([42, 52, 62, 72])
model = XSqrTestModel()
table["e_ref"] = FluxPoints._energy_ref_lafferty(model, table["e_min"], table["e_max"])
dnde = FluxPoints.from_table(table, reference_model=model)
dnde_model = model(table["e_ref"])
desired = model.integral(table["e_min"], table["e_max"])
actual = table["flux"] * (dnde_model / dnde)
assert_allclose(actual, desired, rtol=1e-6)
@pytest.mark.xfail
@pytest.mark.parametrize("method", ["table", "lafferty", "log_center"])
def test_compute_flux_points_dnde_exp(method):
model = ExpTestModel()
energy_min = [1.0, 10.0] * u.TeV
energy_max = [10.0, 100.0] * u.TeV
table = Table()
table.meta["SED_TYPE"] = "flux"
table["e_min"] = energy_min
table["e_max"] = energy_max
flux = model.integral(energy_min, energy_max)
table["flux"] = flux
if method == "log_center":
energy_ref = np.sqrt(energy_min * energy_max)
elif method == "table":
energy_ref = [2.0, 20.0] * u.TeV
elif method == "lafferty":
energy_ref = FluxPoints._energy_ref_lafferty(model, energy_min, energy_max)
table["e_ref"] = energy_ref
result = FluxPoints.from_table(table, reference_model=model)
actual = result.energy_ref
assert_quantity_allclose(actual, energy_ref, rtol=1e-8)
actual = result.dnde
desired = model(energy_ref)
assert_quantity_allclose(actual, desired, rtol=1e-8)
@requires_data()
def test_fermi_to_dnde():
from gammapy.catalog import SourceCatalog4FGL
catalog_4fgl = SourceCatalog4FGL("$GAMMAPY_DATA/catalogs/fermi/gll_psc_v20.fit.gz")
src = catalog_4fgl["FGES J1553.8-5325"]
fp = src.flux_points
assert_allclose(
fp.dnde.quantity[1, 0, 0],
4.567393e-10 * u.Unit("cm-2 s-1 MeV-1"),
rtol=1e-5,
)
@pytest.fixture(params=FLUX_POINTS_FILES, scope="session")
def flux_points(request):
path = "$GAMMAPY_DATA/tests/spectrum/flux_points/" + request.param
return FluxPoints.read(path)
@pytest.fixture(scope="session")
def flux_points_likelihood():
path = "$GAMMAPY_DATA/tests/spectrum/flux_points/binlike.fits"
return FluxPoints.read(path)
@requires_data()
class TestFluxPoints:
def test_info(self, flux_points):
info = str(flux_points)
assert "geom" in info
assert "axes" in info
assert "ref. model" in info
assert "quantities" in info
def test_energy_ref(self, flux_points):
actual = flux_points.energy_ref
desired = np.sqrt(flux_points.energy_min * flux_points.energy_max)
assert_quantity_allclose(actual, desired)
def test_energy_min(self, flux_points):
actual = flux_points.energy_min
desired = 299530.97 * u.MeV
assert_quantity_allclose(actual.sum(), desired)
def test_energy_max(self, flux_points):
actual = flux_points.energy_max
desired = 399430.975 * u.MeV
assert_quantity_allclose(actual.sum(), desired)
def test_write_fits(self, tmp_path, flux_points):
flux_points.write(tmp_path / "tmp.fits", sed_type=flux_points.sed_type_init)
actual = FluxPoints.read(tmp_path / "tmp.fits")
assert str(flux_points) == str(actual)
def test_write_ecsv(self, tmp_path, flux_points):
flux_points.write(tmp_path / "flux_points.ecsv", sed_type=flux_points.sed_type_init)
actual = FluxPoints.read(tmp_path / "flux_points.ecsv")
assert str(flux_points) == str(actual)
def test_quantity_access(self, flux_points_likelihood):
assert flux_points_likelihood.sqrt_ts
assert flux_points_likelihood.ts
assert flux_points_likelihood.stat
assert_allclose(flux_points_likelihood.n_sigma_ul, 2)
assert flux_points_likelihood.sed_type_init == "likelihood"
@requires_dependency("matplotlib")
def test_plot(self, flux_points):
with mpl_plot_check():
flux_points.plot()
@requires_dependency("matplotlib")
def test_plot_likelihood(self, flux_points_likelihood):
with mpl_plot_check():
flux_points_likelihood.plot_ts_profiles()
@requires_dependency("matplotlib")
def test_plot_likelihood_error(self, flux_points_likelihood):
del flux_points_likelihood._data["stat_scan"]
with pytest.raises(AttributeError):
flux_points_likelihood.plot_ts_profiles()
@requires_data()
def test_compute_flux_points_dnde_fermi():
fermi_3fgl = SourceCatalog3FGL()
source = fermi_3fgl["3FGL J0835.3-4510"]
flux_points = source.flux_points
table = source.flux_points_table
for column in ["e2dnde", "e2dnde_errn", "e2dnde_errp", "e2dnde_ul"]:
actual = table[column].quantity
desired = getattr(flux_points, column).quantity.squeeze()
assert_quantity_allclose(actual[:-1], desired[:-1], rtol=0.05)
@requires_data()
@requires_dependency("matplotlib")
def test_plot_fp_no_ul():
path = make_path("$GAMMAPY_DATA/tests/spectrum/flux_points/diff_flux_points.fits")
table = Table.read(path)
table.remove_column('dnde_ul')
fp = FluxPoints.from_table(table, sed_type='dnde')
with mpl_plot_check():
fp.plot()
| true | true |
f713e8c33d889784d56225ab47aa4e7c69c1753d | 1,369 | py | Python | backend/config.py | takusan64/world-dictionary-backend | 98d92547eef6c5bb4adf89dd412273d30db00317 | [
"MIT"
] | null | null | null | backend/config.py | takusan64/world-dictionary-backend | 98d92547eef6c5bb4adf89dd412273d30db00317 | [
"MIT"
] | null | null | null | backend/config.py | takusan64/world-dictionary-backend | 98d92547eef6c5bb4adf89dd412273d30db00317 | [
"MIT"
] | null | null | null | import os
from os.path import join, dirname
from dotenv import load_dotenv
from urllib.parse import urlparse
# loading .env file
env_path = join(dirname(__file__), '.env')
load_dotenv(env_path)
# use function
def url_path_check(path):
sample_host = 'http://localhost'
sample_url = sample_host + path
if urlparse(sample_url) and urlparse(sample_url).path == path:
return path
return None
def number_check(num=None):
if isinstance(int(num), int):
return int(num)
return None
# Register Env Param
try:
API_AUTH_FEATURE = os.environ.get('API_AUTH_FEATURE', 'False').lower() in ('true') or False
DEFAULT_LANGUAGE = os.environ.get('DEFAULT_LANGUAGE') or 'ja'
VERSION = os.environ.get('VERSION') or '1.0.0'
SHOW_SWAGGER_PATH = url_path_check(os.environ.get('SHOW_SWAGGER_PATH') or "") or None
SHOW_REDOC_PATH = url_path_check(os.environ.get('SHOW_REDOC_PATH') or "") or None
SHOW_OPENAPI_PATH = url_path_check(os.environ.get('SHOW_OPENAPI_PATH')) or None
DB_HOST = os.environ.get('DB_HOST') or 'pgsql'
DB_PORT = number_check(os.environ.get('DB_PORT')) or 5432
DB_USER = os.environ.get('DB_USER') or 'postgres'
DB_PASSWORD = os.environ.get('DB_PASSWORD') or 'postgres'
DATABASE = os.environ.get('DATABASE') or 'postgres'
except Exception:
print("defined param error: check .env file")
raise | 36.026316 | 94 | 0.716581 | import os
from os.path import join, dirname
from dotenv import load_dotenv
from urllib.parse import urlparse
env_path = join(dirname(__file__), '.env')
load_dotenv(env_path)
def url_path_check(path):
sample_host = 'http://localhost'
sample_url = sample_host + path
if urlparse(sample_url) and urlparse(sample_url).path == path:
return path
return None
def number_check(num=None):
if isinstance(int(num), int):
return int(num)
return None
try:
API_AUTH_FEATURE = os.environ.get('API_AUTH_FEATURE', 'False').lower() in ('true') or False
DEFAULT_LANGUAGE = os.environ.get('DEFAULT_LANGUAGE') or 'ja'
VERSION = os.environ.get('VERSION') or '1.0.0'
SHOW_SWAGGER_PATH = url_path_check(os.environ.get('SHOW_SWAGGER_PATH') or "") or None
SHOW_REDOC_PATH = url_path_check(os.environ.get('SHOW_REDOC_PATH') or "") or None
SHOW_OPENAPI_PATH = url_path_check(os.environ.get('SHOW_OPENAPI_PATH')) or None
DB_HOST = os.environ.get('DB_HOST') or 'pgsql'
DB_PORT = number_check(os.environ.get('DB_PORT')) or 5432
DB_USER = os.environ.get('DB_USER') or 'postgres'
DB_PASSWORD = os.environ.get('DB_PASSWORD') or 'postgres'
DATABASE = os.environ.get('DATABASE') or 'postgres'
except Exception:
print("defined param error: check .env file")
raise | true | true |
f713ea8db528032ad33727686296727e18b4ed76 | 4,232 | py | Python | pnc/draco3_lb_pnc/draco3_lb_controller.py | MaxxWilson/ASE389Project | 13c3c72887e27fbed2eef63c1e27b4a185036a39 | [
"MIT"
] | 17 | 2021-05-31T10:55:48.000Z | 2022-03-30T10:09:37.000Z | pnc/draco3_lb_pnc/draco3_lb_controller.py | MaxxWilson/ASE389Project | 13c3c72887e27fbed2eef63c1e27b4a185036a39 | [
"MIT"
] | 2 | 2021-10-01T22:11:43.000Z | 2021-12-06T02:34:33.000Z | pnc/draco3_lb_pnc/draco3_lb_controller.py | MaxxWilson/ASE389Project | 13c3c72887e27fbed2eef63c1e27b4a185036a39 | [
"MIT"
] | 3 | 2021-08-24T00:53:18.000Z | 2022-03-31T17:29:07.000Z | import numpy as np
from util import util
from config.draco3_lb_config import PnCConfig, WBCConfig
from pnc.wbc.ihwbc.ihwbc import IHWBC
from pnc.wbc.ihwbc.joint_integrator import JointIntegrator
class Draco3LBController(object):
def __init__(self, tci_container, robot):
self._tci_container = tci_container
self._robot = robot
# Initialize WBC
l_jp_idx, l_jd_idx, r_jp_idx, r_jd_idx = self._robot.get_q_dot_idx(
['l_knee_fe_jp', 'l_knee_fe_jd', 'r_knee_fe_jp', 'r_knee_fe_jd'])
act_list = [False] * robot.n_floating + [True] * robot.n_a
act_list[l_jd_idx] = False
act_list[r_jd_idx] = False
n_q_dot = len(act_list)
n_active = np.count_nonzero(np.array(act_list))
n_passive = n_q_dot - n_active - 6
self._sa = np.zeros((n_active, n_q_dot))
self._sv = np.zeros((n_passive, n_q_dot))
j, k = 0, 0
for i in range(n_q_dot):
if i >= 6:
if act_list[i]:
self._sa[j, i] = 1.
j += 1
else:
self._sv[k, i] = 1.
k += 1
self._sf = np.zeros((6, n_q_dot))
self._sf[0:6, 0:6] = np.eye(6)
self._ihwbc = IHWBC(self._sf, self._sa, self._sv, PnCConfig.SAVE_DATA)
if WBCConfig.B_TRQ_LIMIT:
self._ihwbc.trq_limit = np.dot(self._sa[:, 6:],
self._robot.joint_trq_limit)
self._ihwbc.lambda_q_ddot = WBCConfig.LAMBDA_Q_DDOT
self._ihwbc.lambda_rf = WBCConfig.LAMBDA_RF
# Initialize Joint Integrator
self._joint_integrator = JointIntegrator(robot.n_a,
PnCConfig.CONTROLLER_DT)
self._joint_integrator.pos_cutoff_freq = WBCConfig.POS_CUTOFF_FREQ
self._joint_integrator.vel_cutoff_freq = WBCConfig.VEL_CUTOFF_FREQ
self._joint_integrator.max_pos_err = WBCConfig.MAX_POS_ERR
self._joint_integrator.joint_pos_limit = self._robot.joint_pos_limit
self._joint_integrator.joint_vel_limit = self._robot.joint_vel_limit
self._b_first_visit = True
def get_command(self):
if self._b_first_visit:
self.first_visit()
# Dynamics properties
mass_matrix = self._robot.get_mass_matrix()
mass_matrix_inv = np.linalg.inv(mass_matrix)
coriolis = self._robot.get_coriolis()
gravity = self._robot.get_gravity()
self._ihwbc.update_setting(mass_matrix, mass_matrix_inv, coriolis,
gravity)
# Task, Contact, and Internal Constraint Setup
w_hierarchy_list = []
for task in self._tci_container.task_list:
task.update_jacobian()
task.update_cmd()
w_hierarchy_list.append(task.w_hierarchy)
self._ihwbc.w_hierarchy = np.array(w_hierarchy_list)
for contact in self._tci_container.contact_list:
contact.update_contact()
for internal_constraint in self._tci_container.internal_constraint_list:
internal_constraint.update_internal_constraint()
# WBC commands
joint_trq_cmd, joint_acc_cmd, rf_cmd = self._ihwbc.solve(
self._tci_container.task_list, self._tci_container.contact_list,
self._tci_container.internal_constraint_list)
joint_trq_cmd = np.dot(self._sa[:, 6:].transpose(), joint_trq_cmd)
joint_acc_cmd = np.dot(self._sa[:, 6:].transpose(), joint_acc_cmd)
# Double integration
joint_vel_cmd, joint_pos_cmd = self._joint_integrator.integrate(
joint_acc_cmd, self._robot.joint_velocities,
self._robot.joint_positions)
command = self._robot.create_cmd_ordered_dict(joint_pos_cmd,
joint_vel_cmd,
joint_trq_cmd)
return command
def first_visit(self):
joint_pos_ini = self._robot.joint_positions
self._joint_integrator.initialize_states(np.zeros(self._robot.n_a),
joint_pos_ini)
self._b_first_visit = False
| 41.90099 | 80 | 0.617675 | import numpy as np
from util import util
from config.draco3_lb_config import PnCConfig, WBCConfig
from pnc.wbc.ihwbc.ihwbc import IHWBC
from pnc.wbc.ihwbc.joint_integrator import JointIntegrator
class Draco3LBController(object):
def __init__(self, tci_container, robot):
self._tci_container = tci_container
self._robot = robot
l_jp_idx, l_jd_idx, r_jp_idx, r_jd_idx = self._robot.get_q_dot_idx(
['l_knee_fe_jp', 'l_knee_fe_jd', 'r_knee_fe_jp', 'r_knee_fe_jd'])
act_list = [False] * robot.n_floating + [True] * robot.n_a
act_list[l_jd_idx] = False
act_list[r_jd_idx] = False
n_q_dot = len(act_list)
n_active = np.count_nonzero(np.array(act_list))
n_passive = n_q_dot - n_active - 6
self._sa = np.zeros((n_active, n_q_dot))
self._sv = np.zeros((n_passive, n_q_dot))
j, k = 0, 0
for i in range(n_q_dot):
if i >= 6:
if act_list[i]:
self._sa[j, i] = 1.
j += 1
else:
self._sv[k, i] = 1.
k += 1
self._sf = np.zeros((6, n_q_dot))
self._sf[0:6, 0:6] = np.eye(6)
self._ihwbc = IHWBC(self._sf, self._sa, self._sv, PnCConfig.SAVE_DATA)
if WBCConfig.B_TRQ_LIMIT:
self._ihwbc.trq_limit = np.dot(self._sa[:, 6:],
self._robot.joint_trq_limit)
self._ihwbc.lambda_q_ddot = WBCConfig.LAMBDA_Q_DDOT
self._ihwbc.lambda_rf = WBCConfig.LAMBDA_RF
self._joint_integrator = JointIntegrator(robot.n_a,
PnCConfig.CONTROLLER_DT)
self._joint_integrator.pos_cutoff_freq = WBCConfig.POS_CUTOFF_FREQ
self._joint_integrator.vel_cutoff_freq = WBCConfig.VEL_CUTOFF_FREQ
self._joint_integrator.max_pos_err = WBCConfig.MAX_POS_ERR
self._joint_integrator.joint_pos_limit = self._robot.joint_pos_limit
self._joint_integrator.joint_vel_limit = self._robot.joint_vel_limit
self._b_first_visit = True
def get_command(self):
if self._b_first_visit:
self.first_visit()
mass_matrix = self._robot.get_mass_matrix()
mass_matrix_inv = np.linalg.inv(mass_matrix)
coriolis = self._robot.get_coriolis()
gravity = self._robot.get_gravity()
self._ihwbc.update_setting(mass_matrix, mass_matrix_inv, coriolis,
gravity)
w_hierarchy_list = []
for task in self._tci_container.task_list:
task.update_jacobian()
task.update_cmd()
w_hierarchy_list.append(task.w_hierarchy)
self._ihwbc.w_hierarchy = np.array(w_hierarchy_list)
for contact in self._tci_container.contact_list:
contact.update_contact()
for internal_constraint in self._tci_container.internal_constraint_list:
internal_constraint.update_internal_constraint()
joint_trq_cmd, joint_acc_cmd, rf_cmd = self._ihwbc.solve(
self._tci_container.task_list, self._tci_container.contact_list,
self._tci_container.internal_constraint_list)
joint_trq_cmd = np.dot(self._sa[:, 6:].transpose(), joint_trq_cmd)
joint_acc_cmd = np.dot(self._sa[:, 6:].transpose(), joint_acc_cmd)
joint_vel_cmd, joint_pos_cmd = self._joint_integrator.integrate(
joint_acc_cmd, self._robot.joint_velocities,
self._robot.joint_positions)
command = self._robot.create_cmd_ordered_dict(joint_pos_cmd,
joint_vel_cmd,
joint_trq_cmd)
return command
def first_visit(self):
joint_pos_ini = self._robot.joint_positions
self._joint_integrator.initialize_states(np.zeros(self._robot.n_a),
joint_pos_ini)
self._b_first_visit = False
| true | true |
f713ee8c639c7e6ee5d24c6125c399a3192d0f4d | 1,629 | py | Python | pymantic/parsers/base.py | machallboyd/pymantic | 159208f1a45d4bfda56adaa0cfdb555cadd89d39 | [
"BSD-3-Clause"
] | null | null | null | pymantic/parsers/base.py | machallboyd/pymantic | 159208f1a45d4bfda56adaa0cfdb555cadd89d39 | [
"BSD-3-Clause"
] | 1 | 2019-06-19T13:17:57.000Z | 2019-06-19T13:44:10.000Z | pymantic/parsers/base.py | machallboyd/pymantic | 159208f1a45d4bfda56adaa0cfdb555cadd89d39 | [
"BSD-3-Clause"
] | 1 | 2020-09-22T15:38:33.000Z | 2020-09-22T15:38:33.000Z | from collections import defaultdict
from threading import local
import pymantic.primitives
class BaseParser(object):
"""Common base class for all parsers
Provides shared utilities for creating RDF objects, handling IRIs, and
tracking parser state.
"""
def __init__(self, environment=None):
self.env = environment or pymantic.primitives.RDFEnvironment()
self.profile = self.env.createProfile()
self._call_state = local()
def make_datatype_literal(self, value, datatype):
return self.env.createLiteral(value=value, datatype=datatype)
def make_language_literal(self, value, lang=None):
if lang:
return self.env.createLiteral(value=value, language=lang)
else:
return self.env.createLiteral(value=value)
def make_named_node(self, iri):
return self.env.createNamedNode(iri)
def make_blank_node(self, label=None):
if label:
return self._call_state.bnodes[label]
else:
return self.env.createBlankNode()
def make_triple(self, subject, predicate, object):
return self.env.createTriple(subject, predicate, object)
def make_quad(self, subject, predicate, object, graph):
return self.env.createQuad(subject, predicate, object, graph)
def _prepare_parse(self, graph):
self._call_state.bnodes = defaultdict(self.env.createBlankNode)
self._call_state.graph = graph
def _cleanup_parse(self):
del self._call_state.bnodes
del self._call_state.graph
def _make_graph(self):
return self.env.createGraph()
| 30.735849 | 74 | 0.688152 | from collections import defaultdict
from threading import local
import pymantic.primitives
class BaseParser(object):
def __init__(self, environment=None):
self.env = environment or pymantic.primitives.RDFEnvironment()
self.profile = self.env.createProfile()
self._call_state = local()
def make_datatype_literal(self, value, datatype):
return self.env.createLiteral(value=value, datatype=datatype)
def make_language_literal(self, value, lang=None):
if lang:
return self.env.createLiteral(value=value, language=lang)
else:
return self.env.createLiteral(value=value)
def make_named_node(self, iri):
return self.env.createNamedNode(iri)
def make_blank_node(self, label=None):
if label:
return self._call_state.bnodes[label]
else:
return self.env.createBlankNode()
def make_triple(self, subject, predicate, object):
return self.env.createTriple(subject, predicate, object)
def make_quad(self, subject, predicate, object, graph):
return self.env.createQuad(subject, predicate, object, graph)
def _prepare_parse(self, graph):
self._call_state.bnodes = defaultdict(self.env.createBlankNode)
self._call_state.graph = graph
def _cleanup_parse(self):
del self._call_state.bnodes
del self._call_state.graph
def _make_graph(self):
return self.env.createGraph()
| true | true |
f713eea928557ab0c23074afaa3be56464a6df33 | 3,006 | py | Python | huobi/impl/accountinfomap.py | icemilk00/huobi_Python | 0cccd98ed926faa21e0bfc34033e29755788a36e | [
"Apache-2.0"
] | null | null | null | huobi/impl/accountinfomap.py | icemilk00/huobi_Python | 0cccd98ed926faa21e0bfc34033e29755788a36e | [
"Apache-2.0"
] | null | null | null | huobi/impl/accountinfomap.py | icemilk00/huobi_Python | 0cccd98ed926faa21e0bfc34033e29755788a36e | [
"Apache-2.0"
] | null | null | null | from huobi.exception.huobiapiexception import HuobiApiException
from huobi.impl.restapiinvoker import call_sync
from huobi.model.user import User
class AccountInfoMap:
user_map = dict()
account_id_type_map = dict()
account_type_id_map = dict()
def update_user_info(self, api_key, request_impl):
accounts = call_sync(request_impl.get_accounts())
user = User()
user.accounts = accounts
self.user_map[api_key] = user
if accounts and len(accounts):
self.account_id_type_map[api_key] = {}
self.account_type_id_map[api_key] = {}
for account_item in accounts:
self.account_id_type_map[api_key][account_item.id] = account_item.account_type
self.account_type_id_map[api_key][account_item.account_type] = account_item.id
def get_user(self, api_key):
if api_key is None or api_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "[User] Key is empty or null")
if api_key not in self.user_map:
raise HuobiApiException(HuobiApiException.RUNTIME_ERROR, "[User] Cannot found user by key: " + api_key)
return self.user_map[api_key]
def get_account_by_id(self, api_key, account_id):
user = self.get_user(api_key)
account = user.get_account_by_id(account_id)
if account is None:
raise HuobiApiException(HuobiApiException.RUNTIME_ERROR,
"[User] Cannot find the account, key: " +
api_key + ", account id: " + str(account_id))
return account
def get_all_accounts(self, api_key):
user = self.get_user(api_key)
return user.accounts
def get_account_type_by_id(self, api_key, account_id):
if api_key is None or api_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "[User] Key is empty or null")
if api_key not in self.account_id_type_map:
raise HuobiApiException(HuobiApiException.RUNTIME_ERROR, "[User] Cannot found account_id by key: " + api_key)
return self.account_id_type_map.get(api_key, {}).get(account_id, None)
def get_account_id_by_type(self, api_key, account_type):
if api_key is None or api_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "[User] Key is empty or null")
if api_key not in self.account_type_id_map:
raise HuobiApiException(HuobiApiException.RUNTIME_ERROR, "[User] Cannot found account_type by key: " + api_key)
return self.account_type_id_map.get(api_key, {}).get(account_type, None)
def get_all_accounts_without_check(self, api_key):
if api_key is None or api_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "[User] Key is empty or null")
user = self.user_map.get(api_key, None)
return None if (user is None) else user.accounts
account_info_map = AccountInfoMap()
| 45.545455 | 123 | 0.674983 | from huobi.exception.huobiapiexception import HuobiApiException
from huobi.impl.restapiinvoker import call_sync
from huobi.model.user import User
class AccountInfoMap:
user_map = dict()
account_id_type_map = dict()
account_type_id_map = dict()
def update_user_info(self, api_key, request_impl):
accounts = call_sync(request_impl.get_accounts())
user = User()
user.accounts = accounts
self.user_map[api_key] = user
if accounts and len(accounts):
self.account_id_type_map[api_key] = {}
self.account_type_id_map[api_key] = {}
for account_item in accounts:
self.account_id_type_map[api_key][account_item.id] = account_item.account_type
self.account_type_id_map[api_key][account_item.account_type] = account_item.id
def get_user(self, api_key):
if api_key is None or api_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "[User] Key is empty or null")
if api_key not in self.user_map:
raise HuobiApiException(HuobiApiException.RUNTIME_ERROR, "[User] Cannot found user by key: " + api_key)
return self.user_map[api_key]
def get_account_by_id(self, api_key, account_id):
user = self.get_user(api_key)
account = user.get_account_by_id(account_id)
if account is None:
raise HuobiApiException(HuobiApiException.RUNTIME_ERROR,
"[User] Cannot find the account, key: " +
api_key + ", account id: " + str(account_id))
return account
def get_all_accounts(self, api_key):
user = self.get_user(api_key)
return user.accounts
def get_account_type_by_id(self, api_key, account_id):
if api_key is None or api_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "[User] Key is empty or null")
if api_key not in self.account_id_type_map:
raise HuobiApiException(HuobiApiException.RUNTIME_ERROR, "[User] Cannot found account_id by key: " + api_key)
return self.account_id_type_map.get(api_key, {}).get(account_id, None)
def get_account_id_by_type(self, api_key, account_type):
if api_key is None or api_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "[User] Key is empty or null")
if api_key not in self.account_type_id_map:
raise HuobiApiException(HuobiApiException.RUNTIME_ERROR, "[User] Cannot found account_type by key: " + api_key)
return self.account_type_id_map.get(api_key, {}).get(account_type, None)
def get_all_accounts_without_check(self, api_key):
if api_key is None or api_key == "":
raise HuobiApiException(HuobiApiException.KEY_MISSING, "[User] Key is empty or null")
user = self.user_map.get(api_key, None)
return None if (user is None) else user.accounts
account_info_map = AccountInfoMap()
| true | true |
f713efa3bf3641e0b950751ae3760062ec38d96a | 6,138 | py | Python | visual_change_analysis/main.py | Barry-lab/Publication_TanniDeCothiBarry2021 | 425bc0bd9a74b837d912820e9ea1539a111fcb1f | [
"Unlicense"
] | null | null | null | visual_change_analysis/main.py | Barry-lab/Publication_TanniDeCothiBarry2021 | 425bc0bd9a74b837d912820e9ea1539a111fcb1f | [
"Unlicense"
] | null | null | null | visual_change_analysis/main.py | Barry-lab/Publication_TanniDeCothiBarry2021 | 425bc0bd9a74b837d912820e9ea1539a111fcb1f | [
"Unlicense"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import pearsonr
from bin_data import bin_data
# import pixel data
right_z_pixel_change = np.load("right_z_pixel_change.npy")
left_z_pixel_change = np.load("left_z_pixel_change.npy")
front_z_pixel_change = np.load("front_z_pixel_change.npy")
# average pixel change across front, left & right fovs
pixel_change = np.vstack((left_z_pixel_change, front_z_pixel_change, right_z_pixel_change)).mean(axis=0)
# import rate change data
dat = pd.read_pickle("df_population_vector_change.p")
# Clean the data (sequential data points are 1cm apart along trajectory)
dat = dat[dat.environment == 'D']
df = dat.filter(['animal', 'x_coord', 'y_coord', 'direction', 'timestamp'], axis=1)
dat = dat[~df.isnull().any(axis=1)]
good_pixel_ids = np.array(np.diff(dat.x_coord)**2 + np.diff(dat.y_coord)**2 < 1.01, dtype=bool)
pixel_change = pixel_change[good_pixel_ids]
good_rate_ids = np.append(False, good_pixel_ids)
turning_rate = np.abs(np.diff(dat['direction'])) % 360
turning_rate = turning_rate[good_pixel_ids]
dat = dat[good_rate_ids]
# z-score data
dat['rate change\n(euclidean)'] = (dat['rate change\n(euclidean)'] - np.mean(dat['rate change\n(euclidean)']))/np.std(dat['rate change\n(euclidean)'])
pixel_change = (pixel_change - np.mean(pixel_change))/np.std(pixel_change)
# Plot Occupancy
occupancy = bin_data([dat.x_coord, dat.y_coord], bin_size = 4, limits = [(0, 350), (0, 250)])
plt.imshow(occupancy.T, origin='upper', cmap=plt.get_cmap('jet'))
plt.title('Occupancy')
plt.show()
# Plot pixel change across space
pixel_change_map = bin_data([dat.x_coord, dat.y_coord], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change) / occupancy
plt.imshow(pixel_change_map.T, origin='upper', cmap=plt.get_cmap('jet'))
plt.axis('off')
plt.clim([-1.5,1.5])
plt.title('Pixel Change Map')
plt.show()
# Plot firing rate change across space
rate_change_map = bin_data([dat.x_coord, dat.y_coord], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)']) / occupancy
plt.imshow(rate_change_map.T, origin='upper', cmap=plt.get_cmap('jet'))
plt.axis('off')
plt.clim([-1.5,1.5])
plt.title('Rate Change Map')
plt.show()
corr, _ = pearsonr(pixel_change, dat['rate change\n(euclidean)'])
print('Rate Change vs Pixel Change Pearson r = %.3f' % corr)
# Filter bits of trajectory by head direction
north_ids = (np.degrees(dat.direction) % 360 >= 315) | (np.degrees(dat.direction) % 360 < 45)
north_occupancy = bin_data([dat.x_coord[north_ids], dat.y_coord[north_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])
south_ids = (np.degrees(dat.direction) % 360 >= 135) & (np.degrees(dat.direction) % 360 < 225)
south_occupancy = bin_data([dat.x_coord[south_ids], dat.y_coord[south_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])
east_ids = (np.degrees(dat.direction) % 360 >= 45) & (np.degrees(dat.direction) % 360 < 135)
east_occupancy = bin_data([dat.x_coord[east_ids], dat.y_coord[east_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])
west_ids = (np.degrees(dat.direction) % 360 >= 225) & (np.degrees(dat.direction) % 360 < 315)
west_occupancy = bin_data([dat.x_coord[west_ids], dat.y_coord[west_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])
cmap = plt.get_cmap('jet')
cmap.set_bad('w',1.)
# Calculate pixel and rate change maps by heading direction
north_pix_map = bin_data([dat.x_coord[north_ids], dat.y_coord[north_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[north_ids]) / north_occupancy
south_pix_map = bin_data([dat.x_coord[south_ids], dat.y_coord[south_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[south_ids]) / south_occupancy
east_pix_map = bin_data([dat.x_coord[east_ids], dat.y_coord[east_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[east_ids]) / east_occupancy
west_pix_map = bin_data([dat.x_coord[west_ids], dat.y_coord[west_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[west_ids]) / west_occupancy
north_rat_map = bin_data([dat.x_coord[north_ids], dat.y_coord[north_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)'][north_ids]) / north_occupancy
south_rat_map = bin_data([dat.x_coord[south_ids], dat.y_coord[south_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)'][south_ids]) / south_occupancy
east_rat_map = bin_data([dat.x_coord[east_ids], dat.y_coord[east_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)'][east_ids]) / east_occupancy
west_rat_map = bin_data([dat.x_coord[west_ids], dat.y_coord[west_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)'][west_ids]) / west_occupancy
c_lo = -1.5
c_hi = 1.5
# Plot change maps filtered by direction
plt.subplot(3,3,2)
plt.title('Unfolded Pixel Change Map')
plt.imshow(west_pix_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,4)
plt.imshow(south_pix_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,5)
plt.imshow(pixel_change_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,6)
plt.imshow(north_pix_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,8)
plt.imshow(east_pix_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.show()
plt.subplot(3,3,2)
plt.title('Unfolded Rate Change Map')
plt.imshow(west_rat_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,4)
plt.imshow(south_rat_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,5)
plt.imshow(rate_change_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,6)
plt.imshow(north_rat_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,8)
plt.imshow(east_rat_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.show() | 47.581395 | 194 | 0.717009 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import pearsonr
from bin_data import bin_data
right_z_pixel_change = np.load("right_z_pixel_change.npy")
left_z_pixel_change = np.load("left_z_pixel_change.npy")
front_z_pixel_change = np.load("front_z_pixel_change.npy")
pixel_change = np.vstack((left_z_pixel_change, front_z_pixel_change, right_z_pixel_change)).mean(axis=0)
dat = pd.read_pickle("df_population_vector_change.p")
dat = dat[dat.environment == 'D']
df = dat.filter(['animal', 'x_coord', 'y_coord', 'direction', 'timestamp'], axis=1)
dat = dat[~df.isnull().any(axis=1)]
good_pixel_ids = np.array(np.diff(dat.x_coord)**2 + np.diff(dat.y_coord)**2 < 1.01, dtype=bool)
pixel_change = pixel_change[good_pixel_ids]
good_rate_ids = np.append(False, good_pixel_ids)
turning_rate = np.abs(np.diff(dat['direction'])) % 360
turning_rate = turning_rate[good_pixel_ids]
dat = dat[good_rate_ids]
dat['rate change\n(euclidean)'] = (dat['rate change\n(euclidean)'] - np.mean(dat['rate change\n(euclidean)']))/np.std(dat['rate change\n(euclidean)'])
pixel_change = (pixel_change - np.mean(pixel_change))/np.std(pixel_change)
occupancy = bin_data([dat.x_coord, dat.y_coord], bin_size = 4, limits = [(0, 350), (0, 250)])
plt.imshow(occupancy.T, origin='upper', cmap=plt.get_cmap('jet'))
plt.title('Occupancy')
plt.show()
pixel_change_map = bin_data([dat.x_coord, dat.y_coord], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change) / occupancy
plt.imshow(pixel_change_map.T, origin='upper', cmap=plt.get_cmap('jet'))
plt.axis('off')
plt.clim([-1.5,1.5])
plt.title('Pixel Change Map')
plt.show()
rate_change_map = bin_data([dat.x_coord, dat.y_coord], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)']) / occupancy
plt.imshow(rate_change_map.T, origin='upper', cmap=plt.get_cmap('jet'))
plt.axis('off')
plt.clim([-1.5,1.5])
plt.title('Rate Change Map')
plt.show()
corr, _ = pearsonr(pixel_change, dat['rate change\n(euclidean)'])
print('Rate Change vs Pixel Change Pearson r = %.3f' % corr)
north_ids = (np.degrees(dat.direction) % 360 >= 315) | (np.degrees(dat.direction) % 360 < 45)
north_occupancy = bin_data([dat.x_coord[north_ids], dat.y_coord[north_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])
south_ids = (np.degrees(dat.direction) % 360 >= 135) & (np.degrees(dat.direction) % 360 < 225)
south_occupancy = bin_data([dat.x_coord[south_ids], dat.y_coord[south_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])
east_ids = (np.degrees(dat.direction) % 360 >= 45) & (np.degrees(dat.direction) % 360 < 135)
east_occupancy = bin_data([dat.x_coord[east_ids], dat.y_coord[east_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])
west_ids = (np.degrees(dat.direction) % 360 >= 225) & (np.degrees(dat.direction) % 360 < 315)
west_occupancy = bin_data([dat.x_coord[west_ids], dat.y_coord[west_ids]], bin_size = 4, limits = [(0, 350), (0, 250)])
cmap = plt.get_cmap('jet')
cmap.set_bad('w',1.)
north_pix_map = bin_data([dat.x_coord[north_ids], dat.y_coord[north_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[north_ids]) / north_occupancy
south_pix_map = bin_data([dat.x_coord[south_ids], dat.y_coord[south_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[south_ids]) / south_occupancy
east_pix_map = bin_data([dat.x_coord[east_ids], dat.y_coord[east_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[east_ids]) / east_occupancy
west_pix_map = bin_data([dat.x_coord[west_ids], dat.y_coord[west_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = pixel_change[west_ids]) / west_occupancy
north_rat_map = bin_data([dat.x_coord[north_ids], dat.y_coord[north_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)'][north_ids]) / north_occupancy
south_rat_map = bin_data([dat.x_coord[south_ids], dat.y_coord[south_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)'][south_ids]) / south_occupancy
east_rat_map = bin_data([dat.x_coord[east_ids], dat.y_coord[east_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)'][east_ids]) / east_occupancy
west_rat_map = bin_data([dat.x_coord[west_ids], dat.y_coord[west_ids]], bin_size = 4, limits = [(0, 350), (0, 250)], var_to_bin = dat['rate change\n(euclidean)'][west_ids]) / west_occupancy
c_lo = -1.5
c_hi = 1.5
plt.subplot(3,3,2)
plt.title('Unfolded Pixel Change Map')
plt.imshow(west_pix_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,4)
plt.imshow(south_pix_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,5)
plt.imshow(pixel_change_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,6)
plt.imshow(north_pix_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,8)
plt.imshow(east_pix_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.show()
plt.subplot(3,3,2)
plt.title('Unfolded Rate Change Map')
plt.imshow(west_rat_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,4)
plt.imshow(south_rat_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,5)
plt.imshow(rate_change_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,6)
plt.imshow(north_rat_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.subplot(3,3,8)
plt.imshow(east_rat_map.T, origin='upper', cmap=cmap)
plt.clim([c_lo,c_hi])
plt.axis('off')
plt.show() | true | true |
f713f0ef135be2dc5f92333e9d7d370a70df85aa | 2,956 | py | Python | mbrl/examples/main.py | alik-git/mbrl-lib | b364f8e64ca71ebd18147fe8cdbd3068b74e1f1e | [
"MIT"
] | null | null | null | mbrl/examples/main.py | alik-git/mbrl-lib | b364f8e64ca71ebd18147fe8cdbd3068b74e1f1e | [
"MIT"
] | 2 | 2022-03-09T19:33:18.000Z | 2022-03-09T19:44:22.000Z | mbrl/examples/main.py | alik-git/mbrl-lib | b364f8e64ca71ebd18147fe8cdbd3068b74e1f1e | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable
import hydra
import numpy as np
import omegaconf
import torch
import mbrl.algorithms.mbpo as mbpo
import mbrl.algorithms.pets as pets
import mbrl.algorithms.planet as planet
import mbrl.algorithms.dreamer as dreamer #added April 2022 for project
import mbrl.util.env
import pandas as pd
from collections import Iterable
import wandb
def flatten_config(cfg, curr_nested_key):
"""The nested config file provided by Hydra cannot be parsed by wandb. This recursive function flattens the config file, separating the nested keys and their parents via an underscore. Allows for easier configuration using wandb.
Args:
cfg (Hydra config): The nested config file used by Hydra.
curr_nested_key (str): The current parent key (used for recursive calls).
Returns:
(dict): A flatt configuration dictionary.
"""
flat_cfg = {}
for curr_key in cfg.keys():
# deal with missing values
try:
curr_item = cfg[curr_key]
except Exception as e:
curr_item = 'NA'
# deal with lists
if type(curr_item) == list or type(curr_item) == omegaconf.listconfig.ListConfig:
for nested_idx, nested_item in enumerate(curr_item):
list_nested_key = f"{curr_nested_key}_{curr_key}_{nested_idx}"
flat_cfg[list_nested_key] = nested_item
# check if item is also a config
# recurse
elif isinstance(curr_item, Iterable) and type(curr_item) != str:
flat_cfg.update(flatten_config(curr_item, f"{curr_nested_key}_{curr_key}"))
# otherwise just add to return dict
else:
flat_cfg[f"{curr_nested_key}_{curr_key}"] = curr_item
return flat_cfg
@hydra.main(config_path="conf", config_name="main")
def run(cfg: omegaconf.DictConfig):
env, term_fn, reward_fn = mbrl.util.env.EnvHandler.make_env(cfg)
for config_item in cfg:
wandb.config[config_item] = cfg[config_item]
flat_cfg = flatten_config(cfg, "")
for config_item in flat_cfg:
wandb.config[config_item] = flat_cfg[config_item]
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
if cfg.algorithm.name == "pets":
return pets.train(env, term_fn, reward_fn, cfg)
if cfg.algorithm.name == "mbpo":
test_env, *_ = mbrl.util.env.EnvHandler.make_env(cfg)
return mbpo.train(env, test_env, term_fn, cfg)
if cfg.algorithm.name == "planet":
return planet.train(env, cfg)
if cfg.algorithm.name == "dreamer": #added for project
return dreamer.train(env, cfg)
if __name__ == "__main__":
wandb.init(project="MBRL_Duckyt", entity="mbrl_ducky", monitor_gym=True)
run()
| 33.977011 | 233 | 0.680988 |
from typing import Iterable
import hydra
import numpy as np
import omegaconf
import torch
import mbrl.algorithms.mbpo as mbpo
import mbrl.algorithms.pets as pets
import mbrl.algorithms.planet as planet
import mbrl.algorithms.dreamer as dreamer
import mbrl.util.env
import pandas as pd
from collections import Iterable
import wandb
def flatten_config(cfg, curr_nested_key):
flat_cfg = {}
for curr_key in cfg.keys():
try:
curr_item = cfg[curr_key]
except Exception as e:
curr_item = 'NA'
if type(curr_item) == list or type(curr_item) == omegaconf.listconfig.ListConfig:
for nested_idx, nested_item in enumerate(curr_item):
list_nested_key = f"{curr_nested_key}_{curr_key}_{nested_idx}"
flat_cfg[list_nested_key] = nested_item
elif isinstance(curr_item, Iterable) and type(curr_item) != str:
flat_cfg.update(flatten_config(curr_item, f"{curr_nested_key}_{curr_key}"))
else:
flat_cfg[f"{curr_nested_key}_{curr_key}"] = curr_item
return flat_cfg
@hydra.main(config_path="conf", config_name="main")
def run(cfg: omegaconf.DictConfig):
env, term_fn, reward_fn = mbrl.util.env.EnvHandler.make_env(cfg)
for config_item in cfg:
wandb.config[config_item] = cfg[config_item]
flat_cfg = flatten_config(cfg, "")
for config_item in flat_cfg:
wandb.config[config_item] = flat_cfg[config_item]
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
if cfg.algorithm.name == "pets":
return pets.train(env, term_fn, reward_fn, cfg)
if cfg.algorithm.name == "mbpo":
test_env, *_ = mbrl.util.env.EnvHandler.make_env(cfg)
return mbpo.train(env, test_env, term_fn, cfg)
if cfg.algorithm.name == "planet":
return planet.train(env, cfg)
if cfg.algorithm.name == "dreamer":
return dreamer.train(env, cfg)
if __name__ == "__main__":
wandb.init(project="MBRL_Duckyt", entity="mbrl_ducky", monitor_gym=True)
run()
| true | true |
f713f27d6c357f44964b8142d9ebf6c68ba5f1f4 | 1,631 | py | Python | extern/locating-objects-without-bboxes/object-locator/models/utils.py | YuHsin1998/EllSeg | 91a532650ef809eef081a7ef9af5f1940fb37a37 | [
"MIT"
] | 1 | 2021-05-26T05:45:42.000Z | 2021-05-26T05:45:42.000Z | extern/locating-objects-without-bboxes/object-locator/models/utils.py | xiaohuaibaoguigui/EllSeg | ff56b255f8e650856aec9af23792e105897eba5c | [
"MIT"
] | null | null | null | extern/locating-objects-without-bboxes/object-locator/models/utils.py | xiaohuaibaoguigui/EllSeg | ff56b255f8e650856aec9af23792e105897eba5c | [
"MIT"
] | null | null | null | __copyright__ = \
"""
Copyright ©right © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation.
All rights reserved.
This software is covered by US patents and copyright.
This source code is to be used for academic research purposes only, and no commercial use is allowed.
For any questions, please contact Edward J. Delp (ace@ecn.purdue.edu) at Purdue University.
Last Modified: 10/02/2019
"""
__license__ = "CC BY-NC-SA 4.0"
__authors__ = "Javier Ribera, David Guera, Yuhao Chen, Edward J. Delp"
__version__ = "1.6.0"
import h5py
import torch
import shutil
def save_net(fname, net):
with h5py.File(fname, 'w') as h5f:
for k, v in net.state_dict().items():
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net):
with h5py.File(fname, 'r') as h5f:
for k, v in net.state_dict().items():
param = torch.from_numpy(np.asarray(h5f[k]))
v.copy_(param)
def save_checkpoint(state, is_best,task_id, filename='checkpoint.pth.tar'):
torch.save(state, task_id+filename)
if is_best:
shutil.copyfile(task_id+filename, task_id+'model_best.pth.tar')
"""
Copyright ©right © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation.
All rights reserved.
This software is covered by US patents and copyright.
This source code is to be used for academic research purposes only, and no commercial use is allowed.
For any questions, please contact Edward J. Delp (ace@ecn.purdue.edu) at Purdue University.
Last Modified: 10/02/2019
"""
| 33.285714 | 110 | 0.700184 | __copyright__ = \
"""
Copyright ©right © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation.
All rights reserved.
This software is covered by US patents and copyright.
This source code is to be used for academic research purposes only, and no commercial use is allowed.
For any questions, please contact Edward J. Delp (ace@ecn.purdue.edu) at Purdue University.
Last Modified: 10/02/2019
"""
__license__ = "CC BY-NC-SA 4.0"
__authors__ = "Javier Ribera, David Guera, Yuhao Chen, Edward J. Delp"
__version__ = "1.6.0"
import h5py
import torch
import shutil
def save_net(fname, net):
with h5py.File(fname, 'w') as h5f:
for k, v in net.state_dict().items():
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net):
with h5py.File(fname, 'r') as h5f:
for k, v in net.state_dict().items():
param = torch.from_numpy(np.asarray(h5f[k]))
v.copy_(param)
def save_checkpoint(state, is_best,task_id, filename='checkpoint.pth.tar'):
torch.save(state, task_id+filename)
if is_best:
shutil.copyfile(task_id+filename, task_id+'model_best.pth.tar')
| true | true |
f713f39ba58b4fe5662cd70ad5768f8a6af5ad06 | 9,744 | py | Python | tests/torch/nas/test_state.py | openvinotoolkit/nncf_pytorch | 13a483eac6ed891720ba90d7902142c4b3bfa599 | [
"Apache-2.0"
] | 136 | 2020-06-01T14:03:31.000Z | 2020-10-28T06:10:50.000Z | tests/torch/nas/test_state.py | openvinotoolkit/nncf_pytorch | 13a483eac6ed891720ba90d7902142c4b3bfa599 | [
"Apache-2.0"
] | 133 | 2020-05-26T13:48:04.000Z | 2020-10-28T05:25:55.000Z | tests/torch/nas/test_state.py | openvinotoolkit/nncf_pytorch | 13a483eac6ed891720ba90d7902142c4b3bfa599 | [
"Apache-2.0"
] | 36 | 2020-05-28T08:18:39.000Z | 2020-10-27T14:46:58.000Z | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from copy import deepcopy
from functools import partial
import pytest
import torch
from nncf.common.utils.logger import logger as nncf_logger
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.base_handler import SEHBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_depth import EDBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_kernel import EKBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_width import EWBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elasticity_dim import ElasticityDim
from nncf.torch.model_creation import create_nncf_network
from tests.torch.helpers import BasicConvTestModel
from tests.torch.helpers import get_empty_config
from tests.torch.nas.creators import build_elastic_model_from_handler
from tests.torch.nas.descriptors import ElasticityDesc
from tests.torch.nas.helpers import do_conv2d
from tests.torch.nas.helpers import move_model_to_cuda_if_available
from tests.torch.nas.test_elastic_depth import BASIC_ELASTIC_DEPTH_PARAMS
from tests.torch.nas.test_elastic_depth import BasicTestSuperNet
from tests.torch.nas.test_elastic_depth import DepthBasicConvTestModel
from tests.torch.nas.test_elastic_kernel import BASIC_ELASTIC_KERNEL_PARAMS
from tests.torch.nas.test_elastic_width import BASIC_ELASTIC_WIDTH_PARAMS
from tests.torch.nas.test_elastic_width import TwoConvAddConvTestModel
from tests.torch.nas.test_elastic_width import TwoSequentialConvBNTestModel
@pytest.yield_fixture()
def _nncf_caplog(caplog):
nncf_logger.propagate = True
yield caplog
nncf_logger.propagate = False
def ref_width_output_fn(model, x):
return model.get_minimal_subnet_output_without_reorg(x)
COMMON_WIDTH_STATE_DESCS = [
ElasticityDesc(
ElasticityDim.WIDTH,
model_cls=TwoConvAddConvTestModel,
params=BASIC_ELASTIC_WIDTH_PARAMS,
ref_state={
'elasticity_params': BASIC_ELASTIC_WIDTH_PARAMS,
'grouped_node_names_to_prune': [
['TwoConvAddConvTestModel/NNCFConv2d[conv1]/conv2d_0',
'TwoConvAddConvTestModel/NNCFConv2d[conv2]/conv2d_0']
]
},
ref_output_fn=ref_width_output_fn
),
ElasticityDesc(
ElasticityDim.WIDTH,
model_cls=TwoSequentialConvBNTestModel,
params=BASIC_ELASTIC_WIDTH_PARAMS,
ref_state={
'elasticity_params': BASIC_ELASTIC_WIDTH_PARAMS,
'grouped_node_names_to_prune': [
['TwoSequentialConvBNTestModel/Sequential[all_layers]/NNCFConv2d[0]/conv2d_0'],
['TwoSequentialConvBNTestModel/Sequential[all_layers]/NNCFConv2d[3]/conv2d_0']
]
},
ref_output_fn=ref_width_output_fn
),
]
def ref_kernel_output_fn(model, x):
conv = model.conv
ref_padding = 1
ref_weights = conv.weight[:, :, 1:4, 1:4]
return do_conv2d(conv, x, weight=ref_weights, padding=ref_padding)
COMMON_KERNEL_DESC = ElasticityDesc(
ElasticityDim.KERNEL,
model_cls=partial(BasicConvTestModel, 1, out_channels=1, kernel_size=5),
params=BASIC_ELASTIC_KERNEL_PARAMS,
ref_output_fn=ref_kernel_output_fn,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: BASIC_ELASTIC_KERNEL_PARAMS,
EKBuilderStateNames.NODE_NAMES_TO_MAKE_ELASTIC: ['BasicConvTestModel/NNCFConv2d[conv]/conv2d_0']
},
input_size=[1, 1, 5, 5]
)
COMMON_DEPTH_SUPERNET_DESC = ElasticityDesc(
ElasticityDim.DEPTH,
model_cls=BasicTestSuperNet,
params={
'mode': 'auto',
'min_block_size': 2
},
ref_state={
'elasticity_params': {
'allow_linear_combination': False,
'allow_nested_blocks': False,
'max_block_size': 50,
'min_block_size': 2,
'skipped_blocks': None
},
EDBuilderStateNames.SKIPPED_BLOCKS: [
{
'start_node_name': 'BasicTestSuperNet/NNCFConv2d[conv1]/conv2d_0',
'end_node_name': 'BasicTestSuperNet/__add___0'
}
],
EDBuilderStateNames.SKIPPED_BLOCKS_DEPENDENCIES: {0: [0]},
EDBuilderStateNames.OrdinalIds: [[1, 3]],
},
ref_search_space=[[0], []]
)
def ref_depth_output_fn(model, x):
model.set_skipped_layers(['conv1'])
return model(x)
COMMON_DEPTH_BASIC_DESC = ElasticityDesc(
ElasticityDim.DEPTH,
model_cls=DepthBasicConvTestModel,
params=BASIC_ELASTIC_DEPTH_PARAMS,
ref_output_fn=ref_depth_output_fn,
ref_search_space=[[0], []],
ref_state={
'elasticity_params': {
'allow_linear_combination': False,
'allow_nested_blocks': False,
'max_block_size': 50,
'min_block_size': 6,
'skipped_blocks': [['DepthBasicConvTestModel/Sequential[branch_with_blocks]/NNCFConv2d[conv0]/conv2d_0',
'DepthBasicConvTestModel/Sequential[branch_with_blocks]/NNCFConv2d[conv1]/conv2d_0']]
},
EDBuilderStateNames.SKIPPED_BLOCKS: BASIC_ELASTIC_DEPTH_PARAMS['skipped_blocks_state'],
EDBuilderStateNames.SKIPPED_BLOCKS_DEPENDENCIES: BASIC_ELASTIC_DEPTH_PARAMS['skipped_blocks_dependencies'],
EDBuilderStateNames.OrdinalIds: None,
}
)
LIST_STATE_AFTER_BUILD_DESCS = [
*COMMON_WIDTH_STATE_DESCS,
COMMON_DEPTH_SUPERNET_DESC,
COMMON_KERNEL_DESC
]
@pytest.mark.parametrize('desc', LIST_STATE_AFTER_BUILD_DESCS, ids=map(str, LIST_STATE_AFTER_BUILD_DESCS))
def test_can_get_builder_state_after_build(desc):
_, builder = desc.build_handler()
actual_state = builder.get_state()
assert actual_state == desc.ref_state
ELASTIC_WIDTH_PARAMS_BB = {'filter_importance': 'L2', **BASIC_ELASTIC_WIDTH_PARAMS}
LIST_STATE_BEFORE_BUILD_DESCS = [
ElasticityDesc(
ElasticityDim.WIDTH,
params=ELASTIC_WIDTH_PARAMS_BB,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: ELASTIC_WIDTH_PARAMS_BB,
EWBuilderStateNames.GROUPED_NODE_NAMES_TO_PRUNE: []
}
),
ElasticityDesc(
ElasticityDim.KERNEL,
params=BASIC_ELASTIC_KERNEL_PARAMS,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: BASIC_ELASTIC_KERNEL_PARAMS,
EKBuilderStateNames.NODE_NAMES_TO_MAKE_ELASTIC: []
}
),
COMMON_DEPTH_BASIC_DESC
]
@pytest.mark.parametrize('desc', LIST_STATE_BEFORE_BUILD_DESCS, ids=map(str, LIST_STATE_BEFORE_BUILD_DESCS))
class TestBeforeBuild:
def test_can_get_builder_state_before_build(self, desc: ElasticityDesc):
builder = desc.create_builder()
actual_state = builder.get_state()
assert actual_state == desc.ref_state
def test_output_warning_when_state_overrides_params(self, desc: ElasticityDesc, _nncf_caplog):
old_builder = desc.create_builder_with_config({})
old_state = old_builder.get_state()
new_params = desc.params
new_builder = desc.create_builder_with_config(new_params)
new_builder.load_state(old_state)
record = next(iter(_nncf_caplog.records))
assert record.levelno == logging.WARNING
def test_no_warning_when_state_and_params_are_the_same(self, desc: ElasticityDesc, _nncf_caplog):
old_builder = desc.create_builder()
old_state = old_builder.get_state()
new_params = desc.params.copy()
new_builder = desc.create_builder_with_config(new_params)
new_builder.load_state(old_state)
assert not _nncf_caplog.records
LIST_LOAD_STATE_DESCS = [
COMMON_DEPTH_BASIC_DESC,
*COMMON_WIDTH_STATE_DESCS,
COMMON_KERNEL_DESC
]
@pytest.mark.parametrize('desc', LIST_LOAD_STATE_DESCS, ids=map(str, LIST_LOAD_STATE_DESCS))
def test_can_load_handler_state(desc: ElasticityDesc):
model = desc.model_cls()
move_model_to_cuda_if_available(model)
model_copy = deepcopy(model)
device = next(iter(model.parameters())).device
dummy_input = torch.ones(model.INPUT_SIZE).to(device)
input_size = desc.input_size
if not input_size:
input_size = model.INPUT_SIZE
config = get_empty_config(input_sample_sizes=input_size)
old_nncf_network = create_nncf_network(model, config)
old_builder = desc.create_builder()
old_handler = old_builder.build(old_nncf_network)
elastic_model = build_elastic_model_from_handler(old_nncf_network, old_handler)
old_handler.activate_minimum_subnet()
old_output = elastic_model(dummy_input)
ref_output = desc.ref_output_fn(model, dummy_input)
assert torch.allclose(old_output, ref_output)
new_nncf_network = create_nncf_network(model_copy, config)
builder_state = old_builder.get_state()
# no need in config to restore builder state
new_builder = desc.create_builder_with_config({})
new_builder.load_state(builder_state)
new_handler = new_builder.build(new_nncf_network)
elastic_model = build_elastic_model_from_handler(new_nncf_network, new_handler)
new_handler.activate_minimum_subnet()
new_output = elastic_model(dummy_input)
assert torch.allclose(old_output, new_output)
| 37.476923 | 117 | 0.74179 | import logging
from copy import deepcopy
from functools import partial
import pytest
import torch
from nncf.common.utils.logger import logger as nncf_logger
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.base_handler import SEHBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_depth import EDBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_kernel import EKBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_width import EWBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elasticity_dim import ElasticityDim
from nncf.torch.model_creation import create_nncf_network
from tests.torch.helpers import BasicConvTestModel
from tests.torch.helpers import get_empty_config
from tests.torch.nas.creators import build_elastic_model_from_handler
from tests.torch.nas.descriptors import ElasticityDesc
from tests.torch.nas.helpers import do_conv2d
from tests.torch.nas.helpers import move_model_to_cuda_if_available
from tests.torch.nas.test_elastic_depth import BASIC_ELASTIC_DEPTH_PARAMS
from tests.torch.nas.test_elastic_depth import BasicTestSuperNet
from tests.torch.nas.test_elastic_depth import DepthBasicConvTestModel
from tests.torch.nas.test_elastic_kernel import BASIC_ELASTIC_KERNEL_PARAMS
from tests.torch.nas.test_elastic_width import BASIC_ELASTIC_WIDTH_PARAMS
from tests.torch.nas.test_elastic_width import TwoConvAddConvTestModel
from tests.torch.nas.test_elastic_width import TwoSequentialConvBNTestModel
@pytest.yield_fixture()
def _nncf_caplog(caplog):
nncf_logger.propagate = True
yield caplog
nncf_logger.propagate = False
def ref_width_output_fn(model, x):
return model.get_minimal_subnet_output_without_reorg(x)
COMMON_WIDTH_STATE_DESCS = [
ElasticityDesc(
ElasticityDim.WIDTH,
model_cls=TwoConvAddConvTestModel,
params=BASIC_ELASTIC_WIDTH_PARAMS,
ref_state={
'elasticity_params': BASIC_ELASTIC_WIDTH_PARAMS,
'grouped_node_names_to_prune': [
['TwoConvAddConvTestModel/NNCFConv2d[conv1]/conv2d_0',
'TwoConvAddConvTestModel/NNCFConv2d[conv2]/conv2d_0']
]
},
ref_output_fn=ref_width_output_fn
),
ElasticityDesc(
ElasticityDim.WIDTH,
model_cls=TwoSequentialConvBNTestModel,
params=BASIC_ELASTIC_WIDTH_PARAMS,
ref_state={
'elasticity_params': BASIC_ELASTIC_WIDTH_PARAMS,
'grouped_node_names_to_prune': [
['TwoSequentialConvBNTestModel/Sequential[all_layers]/NNCFConv2d[0]/conv2d_0'],
['TwoSequentialConvBNTestModel/Sequential[all_layers]/NNCFConv2d[3]/conv2d_0']
]
},
ref_output_fn=ref_width_output_fn
),
]
def ref_kernel_output_fn(model, x):
conv = model.conv
ref_padding = 1
ref_weights = conv.weight[:, :, 1:4, 1:4]
return do_conv2d(conv, x, weight=ref_weights, padding=ref_padding)
COMMON_KERNEL_DESC = ElasticityDesc(
ElasticityDim.KERNEL,
model_cls=partial(BasicConvTestModel, 1, out_channels=1, kernel_size=5),
params=BASIC_ELASTIC_KERNEL_PARAMS,
ref_output_fn=ref_kernel_output_fn,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: BASIC_ELASTIC_KERNEL_PARAMS,
EKBuilderStateNames.NODE_NAMES_TO_MAKE_ELASTIC: ['BasicConvTestModel/NNCFConv2d[conv]/conv2d_0']
},
input_size=[1, 1, 5, 5]
)
COMMON_DEPTH_SUPERNET_DESC = ElasticityDesc(
ElasticityDim.DEPTH,
model_cls=BasicTestSuperNet,
params={
'mode': 'auto',
'min_block_size': 2
},
ref_state={
'elasticity_params': {
'allow_linear_combination': False,
'allow_nested_blocks': False,
'max_block_size': 50,
'min_block_size': 2,
'skipped_blocks': None
},
EDBuilderStateNames.SKIPPED_BLOCKS: [
{
'start_node_name': 'BasicTestSuperNet/NNCFConv2d[conv1]/conv2d_0',
'end_node_name': 'BasicTestSuperNet/__add___0'
}
],
EDBuilderStateNames.SKIPPED_BLOCKS_DEPENDENCIES: {0: [0]},
EDBuilderStateNames.OrdinalIds: [[1, 3]],
},
ref_search_space=[[0], []]
)
def ref_depth_output_fn(model, x):
model.set_skipped_layers(['conv1'])
return model(x)
COMMON_DEPTH_BASIC_DESC = ElasticityDesc(
ElasticityDim.DEPTH,
model_cls=DepthBasicConvTestModel,
params=BASIC_ELASTIC_DEPTH_PARAMS,
ref_output_fn=ref_depth_output_fn,
ref_search_space=[[0], []],
ref_state={
'elasticity_params': {
'allow_linear_combination': False,
'allow_nested_blocks': False,
'max_block_size': 50,
'min_block_size': 6,
'skipped_blocks': [['DepthBasicConvTestModel/Sequential[branch_with_blocks]/NNCFConv2d[conv0]/conv2d_0',
'DepthBasicConvTestModel/Sequential[branch_with_blocks]/NNCFConv2d[conv1]/conv2d_0']]
},
EDBuilderStateNames.SKIPPED_BLOCKS: BASIC_ELASTIC_DEPTH_PARAMS['skipped_blocks_state'],
EDBuilderStateNames.SKIPPED_BLOCKS_DEPENDENCIES: BASIC_ELASTIC_DEPTH_PARAMS['skipped_blocks_dependencies'],
EDBuilderStateNames.OrdinalIds: None,
}
)
LIST_STATE_AFTER_BUILD_DESCS = [
*COMMON_WIDTH_STATE_DESCS,
COMMON_DEPTH_SUPERNET_DESC,
COMMON_KERNEL_DESC
]
@pytest.mark.parametrize('desc', LIST_STATE_AFTER_BUILD_DESCS, ids=map(str, LIST_STATE_AFTER_BUILD_DESCS))
def test_can_get_builder_state_after_build(desc):
_, builder = desc.build_handler()
actual_state = builder.get_state()
assert actual_state == desc.ref_state
ELASTIC_WIDTH_PARAMS_BB = {'filter_importance': 'L2', **BASIC_ELASTIC_WIDTH_PARAMS}
LIST_STATE_BEFORE_BUILD_DESCS = [
ElasticityDesc(
ElasticityDim.WIDTH,
params=ELASTIC_WIDTH_PARAMS_BB,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: ELASTIC_WIDTH_PARAMS_BB,
EWBuilderStateNames.GROUPED_NODE_NAMES_TO_PRUNE: []
}
),
ElasticityDesc(
ElasticityDim.KERNEL,
params=BASIC_ELASTIC_KERNEL_PARAMS,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: BASIC_ELASTIC_KERNEL_PARAMS,
EKBuilderStateNames.NODE_NAMES_TO_MAKE_ELASTIC: []
}
),
COMMON_DEPTH_BASIC_DESC
]
@pytest.mark.parametrize('desc', LIST_STATE_BEFORE_BUILD_DESCS, ids=map(str, LIST_STATE_BEFORE_BUILD_DESCS))
class TestBeforeBuild:
def test_can_get_builder_state_before_build(self, desc: ElasticityDesc):
builder = desc.create_builder()
actual_state = builder.get_state()
assert actual_state == desc.ref_state
def test_output_warning_when_state_overrides_params(self, desc: ElasticityDesc, _nncf_caplog):
old_builder = desc.create_builder_with_config({})
old_state = old_builder.get_state()
new_params = desc.params
new_builder = desc.create_builder_with_config(new_params)
new_builder.load_state(old_state)
record = next(iter(_nncf_caplog.records))
assert record.levelno == logging.WARNING
def test_no_warning_when_state_and_params_are_the_same(self, desc: ElasticityDesc, _nncf_caplog):
old_builder = desc.create_builder()
old_state = old_builder.get_state()
new_params = desc.params.copy()
new_builder = desc.create_builder_with_config(new_params)
new_builder.load_state(old_state)
assert not _nncf_caplog.records
LIST_LOAD_STATE_DESCS = [
COMMON_DEPTH_BASIC_DESC,
*COMMON_WIDTH_STATE_DESCS,
COMMON_KERNEL_DESC
]
@pytest.mark.parametrize('desc', LIST_LOAD_STATE_DESCS, ids=map(str, LIST_LOAD_STATE_DESCS))
def test_can_load_handler_state(desc: ElasticityDesc):
model = desc.model_cls()
move_model_to_cuda_if_available(model)
model_copy = deepcopy(model)
device = next(iter(model.parameters())).device
dummy_input = torch.ones(model.INPUT_SIZE).to(device)
input_size = desc.input_size
if not input_size:
input_size = model.INPUT_SIZE
config = get_empty_config(input_sample_sizes=input_size)
old_nncf_network = create_nncf_network(model, config)
old_builder = desc.create_builder()
old_handler = old_builder.build(old_nncf_network)
elastic_model = build_elastic_model_from_handler(old_nncf_network, old_handler)
old_handler.activate_minimum_subnet()
old_output = elastic_model(dummy_input)
ref_output = desc.ref_output_fn(model, dummy_input)
assert torch.allclose(old_output, ref_output)
new_nncf_network = create_nncf_network(model_copy, config)
builder_state = old_builder.get_state()
new_builder = desc.create_builder_with_config({})
new_builder.load_state(builder_state)
new_handler = new_builder.build(new_nncf_network)
elastic_model = build_elastic_model_from_handler(new_nncf_network, new_handler)
new_handler.activate_minimum_subnet()
new_output = elastic_model(dummy_input)
assert torch.allclose(old_output, new_output)
| true | true |
f713f3f9c3a36c6b39f53d8a182a15163e5b1137 | 7,573 | gyp | Python | gyp/animator.gyp | Android4SAM/platform_external_skia | 7cd90d4eecdba0f40a36945749d40df95d6d641b | [
"BSD-3-Clause"
] | null | null | null | gyp/animator.gyp | Android4SAM/platform_external_skia | 7cd90d4eecdba0f40a36945749d40df95d6d641b | [
"BSD-3-Clause"
] | null | null | null | gyp/animator.gyp | Android4SAM/platform_external_skia | 7cd90d4eecdba0f40a36945749d40df95d6d641b | [
"BSD-3-Clause"
] | 1 | 2020-11-12T05:40:43.000Z | 2020-11-12T05:40:43.000Z | {
'includes': [
'common.gypi',
],
'targets': [
{
'target_name': 'animator',
'type': 'static_library',
'include_dirs': [
'../include/config',
'../include/core',
'../include/effects',
'../include/animator',
'../include/views',
'../include/xml',
'../include/utils',
'../include/images',
'../src/utils',
],
'sources': [
'../include/animator/SkAnimator.h',
'../include/animator/SkAnimatorView.h',
'../src/animator/SkAnimate.h',
'../src/animator/SkAnimateActive.cpp',
'../src/animator/SkAnimateActive.h',
'../src/animator/SkAnimateBase.cpp',
'../src/animator/SkAnimateBase.h',
'../src/animator/SkAnimateField.cpp',
'../src/animator/SkAnimateMaker.cpp',
'../src/animator/SkAnimateMaker.h',
'../src/animator/SkAnimateProperties.h',
'../src/animator/SkAnimateSet.cpp',
'../src/animator/SkAnimateSet.h',
'../src/animator/SkAnimator.cpp',
'../src/animator/SkAnimatorScript.cpp',
'../src/animator/SkAnimatorScript.h',
#'../src/animator/SkAnimatorScript2.cpp', fails on windows
#'../src/animator/SkAnimatorScript2.h',
'../src/animator/SkBoundable.cpp',
'../src/animator/SkBoundable.h',
'../src/animator/SkBuildCondensedInfo.cpp',
#'../src/animator/SkCondensedDebug.cpp', fails on windows
#'../src/animator/SkCondensedRelease.cpp',
'../src/animator/SkDisplayable.cpp',
'../src/animator/SkDisplayable.h',
'../src/animator/SkDisplayAdd.cpp',
'../src/animator/SkDisplayAdd.h',
'../src/animator/SkDisplayApply.cpp',
'../src/animator/SkDisplayApply.h',
'../src/animator/SkDisplayBounds.cpp',
'../src/animator/SkDisplayBounds.h',
'../src/animator/SkDisplayEvent.cpp',
'../src/animator/SkDisplayEvent.h',
'../src/animator/SkDisplayEvents.cpp',
'../src/animator/SkDisplayEvents.h',
'../src/animator/SkDisplayInclude.cpp',
'../src/animator/SkDisplayInclude.h',
'../src/animator/SkDisplayInput.cpp',
'../src/animator/SkDisplayInput.h',
'../src/animator/SkDisplayList.cpp',
'../src/animator/SkDisplayList.h',
'../src/animator/SkDisplayMath.cpp',
'../src/animator/SkDisplayMath.h',
'../src/animator/SkDisplayMovie.cpp',
'../src/animator/SkDisplayMovie.h',
'../src/animator/SkDisplayNumber.cpp',
'../src/animator/SkDisplayNumber.h',
'../src/animator/SkDisplayPost.cpp',
'../src/animator/SkDisplayPost.h',
'../src/animator/SkDisplayRandom.cpp',
'../src/animator/SkDisplayRandom.h',
'../src/animator/SkDisplayScreenplay.cpp',
'../src/animator/SkDisplayScreenplay.h',
'../src/animator/SkDisplayType.cpp',
'../src/animator/SkDisplayType.h',
'../src/animator/SkDisplayTypes.cpp',
'../src/animator/SkDisplayTypes.h',
'../src/animator/SkDisplayXMLParser.cpp',
'../src/animator/SkDisplayXMLParser.h',
'../src/animator/SkDraw3D.cpp',
'../src/animator/SkDraw3D.h',
'../src/animator/SkDrawable.cpp',
'../src/animator/SkDrawable.h',
'../src/animator/SkDrawBitmap.cpp',
'../src/animator/SkDrawBitmap.h',
'../src/animator/SkDrawBlur.cpp',
'../src/animator/SkDrawBlur.h',
'../src/animator/SkDrawClip.cpp',
'../src/animator/SkDrawClip.h',
'../src/animator/SkDrawColor.cpp',
'../src/animator/SkDrawColor.h',
'../src/animator/SkDrawDash.cpp',
'../src/animator/SkDrawDash.h',
'../src/animator/SkDrawDiscrete.cpp',
'../src/animator/SkDrawDiscrete.h',
'../src/animator/SkDrawEmboss.cpp',
'../src/animator/SkDrawEmboss.h',
'../src/animator/SkDrawExtraPathEffect.cpp',
'../src/animator/SkDrawFull.cpp',
'../src/animator/SkDrawFull.h',
'../src/animator/SkDrawGradient.cpp',
'../src/animator/SkDrawGradient.h',
'../src/animator/SkDrawGroup.cpp',
'../src/animator/SkDrawGroup.h',
'../src/animator/SkDrawLine.cpp',
'../src/animator/SkDrawLine.h',
'../src/animator/SkDrawMatrix.cpp',
'../src/animator/SkDrawMatrix.h',
'../src/animator/SkDrawOval.cpp',
'../src/animator/SkDrawOval.h',
'../src/animator/SkDrawPaint.cpp',
'../src/animator/SkDrawPaint.h',
'../src/animator/SkDrawPath.cpp',
'../src/animator/SkDrawPath.h',
'../src/animator/SkDrawPoint.cpp',
'../src/animator/SkDrawPoint.h',
'../src/animator/SkDrawRectangle.cpp',
'../src/animator/SkDrawRectangle.h',
'../src/animator/SkDrawSaveLayer.cpp',
'../src/animator/SkDrawSaveLayer.h',
'../src/animator/SkDrawShader.cpp',
'../src/animator/SkDrawShader.h',
'../src/animator/SkDrawText.cpp',
'../src/animator/SkDrawText.h',
'../src/animator/SkDrawTextBox.cpp',
'../src/animator/SkDrawTextBox.h',
'../src/animator/SkDrawTo.cpp',
'../src/animator/SkDrawTo.h',
'../src/animator/SkDrawTransparentShader.cpp',
'../src/animator/SkDrawTransparentShader.h',
'../src/animator/SkDump.cpp',
'../src/animator/SkDump.h',
'../src/animator/SkExtras.h',
'../src/animator/SkGetCondensedInfo.cpp',
'../src/animator/SkHitClear.cpp',
'../src/animator/SkHitClear.h',
'../src/animator/SkHitTest.cpp',
'../src/animator/SkHitTest.h',
'../src/animator/SkIntArray.h',
'../src/animator/SkMatrixParts.cpp',
'../src/animator/SkMatrixParts.h',
'../src/animator/SkMemberInfo.cpp',
'../src/animator/SkMemberInfo.h',
'../src/animator/SkOpArray.cpp',
'../src/animator/SkOpArray.h',
'../src/animator/SkOperand.h',
'../src/animator/SkOperand2.h',
'../src/animator/SkOperandInterpolator.h',
'../src/animator/SkOperandIterpolator.cpp',
'../src/animator/SkPaintParts.cpp',
'../src/animator/SkPaintParts.h',
'../src/animator/SkParseSVGPath.cpp',
'../src/animator/SkPathParts.cpp',
'../src/animator/SkPathParts.h',
'../src/animator/SkPostParts.cpp',
'../src/animator/SkPostParts.h',
'../src/animator/SkScript.cpp',
'../src/animator/SkScript.h',
'../src/animator/SkScript2.h',
'../src/animator/SkScriptCallBack.h',
'../src/animator/SkScriptDecompile.cpp',
'../src/animator/SkScriptRuntime.cpp',
'../src/animator/SkScriptRuntime.h',
'../src/animator/SkScriptTokenizer.cpp',
'../src/animator/SkSnapshot.cpp',
'../src/animator/SkSnapshot.h',
'../src/animator/SkTDArray_Experimental.h',
'../src/animator/SkTextOnPath.cpp',
'../src/animator/SkTextOnPath.h',
'../src/animator/SkTextToPath.cpp',
'../src/animator/SkTextToPath.h',
'../src/animator/SkTime.cpp',
'../src/animator/SkTypedArray.cpp',
'../src/animator/SkTypedArray.h',
'../src/animator/SkXMLAnimatorWriter.cpp',
'../src/animator/SkXMLAnimatorWriter.h',
],
'direct_dependent_settings': {
'include_dirs': [
'../include/animator',
],
},
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| 39.238342 | 66 | 0.579955 | {
'includes': [
'common.gypi',
],
'targets': [
{
'target_name': 'animator',
'type': 'static_library',
'include_dirs': [
'../include/config',
'../include/core',
'../include/effects',
'../include/animator',
'../include/views',
'../include/xml',
'../include/utils',
'../include/images',
'../src/utils',
],
'sources': [
'../include/animator/SkAnimator.h',
'../include/animator/SkAnimatorView.h',
'../src/animator/SkAnimate.h',
'../src/animator/SkAnimateActive.cpp',
'../src/animator/SkAnimateActive.h',
'../src/animator/SkAnimateBase.cpp',
'../src/animator/SkAnimateBase.h',
'../src/animator/SkAnimateField.cpp',
'../src/animator/SkAnimateMaker.cpp',
'../src/animator/SkAnimateMaker.h',
'../src/animator/SkAnimateProperties.h',
'../src/animator/SkAnimateSet.cpp',
'../src/animator/SkAnimateSet.h',
'../src/animator/SkAnimator.cpp',
'../src/animator/SkAnimatorScript.cpp',
'../src/animator/SkAnimatorScript.h',
'../src/animator/SkBoundable.cpp',
'../src/animator/SkBoundable.h',
'../src/animator/SkBuildCondensedInfo.cpp',
'../src/animator/SkDisplayable.cpp',
'../src/animator/SkDisplayable.h',
'../src/animator/SkDisplayAdd.cpp',
'../src/animator/SkDisplayAdd.h',
'../src/animator/SkDisplayApply.cpp',
'../src/animator/SkDisplayApply.h',
'../src/animator/SkDisplayBounds.cpp',
'../src/animator/SkDisplayBounds.h',
'../src/animator/SkDisplayEvent.cpp',
'../src/animator/SkDisplayEvent.h',
'../src/animator/SkDisplayEvents.cpp',
'../src/animator/SkDisplayEvents.h',
'../src/animator/SkDisplayInclude.cpp',
'../src/animator/SkDisplayInclude.h',
'../src/animator/SkDisplayInput.cpp',
'../src/animator/SkDisplayInput.h',
'../src/animator/SkDisplayList.cpp',
'../src/animator/SkDisplayList.h',
'../src/animator/SkDisplayMath.cpp',
'../src/animator/SkDisplayMath.h',
'../src/animator/SkDisplayMovie.cpp',
'../src/animator/SkDisplayMovie.h',
'../src/animator/SkDisplayNumber.cpp',
'../src/animator/SkDisplayNumber.h',
'../src/animator/SkDisplayPost.cpp',
'../src/animator/SkDisplayPost.h',
'../src/animator/SkDisplayRandom.cpp',
'../src/animator/SkDisplayRandom.h',
'../src/animator/SkDisplayScreenplay.cpp',
'../src/animator/SkDisplayScreenplay.h',
'../src/animator/SkDisplayType.cpp',
'../src/animator/SkDisplayType.h',
'../src/animator/SkDisplayTypes.cpp',
'../src/animator/SkDisplayTypes.h',
'../src/animator/SkDisplayXMLParser.cpp',
'../src/animator/SkDisplayXMLParser.h',
'../src/animator/SkDraw3D.cpp',
'../src/animator/SkDraw3D.h',
'../src/animator/SkDrawable.cpp',
'../src/animator/SkDrawable.h',
'../src/animator/SkDrawBitmap.cpp',
'../src/animator/SkDrawBitmap.h',
'../src/animator/SkDrawBlur.cpp',
'../src/animator/SkDrawBlur.h',
'../src/animator/SkDrawClip.cpp',
'../src/animator/SkDrawClip.h',
'../src/animator/SkDrawColor.cpp',
'../src/animator/SkDrawColor.h',
'../src/animator/SkDrawDash.cpp',
'../src/animator/SkDrawDash.h',
'../src/animator/SkDrawDiscrete.cpp',
'../src/animator/SkDrawDiscrete.h',
'../src/animator/SkDrawEmboss.cpp',
'../src/animator/SkDrawEmboss.h',
'../src/animator/SkDrawExtraPathEffect.cpp',
'../src/animator/SkDrawFull.cpp',
'../src/animator/SkDrawFull.h',
'../src/animator/SkDrawGradient.cpp',
'../src/animator/SkDrawGradient.h',
'../src/animator/SkDrawGroup.cpp',
'../src/animator/SkDrawGroup.h',
'../src/animator/SkDrawLine.cpp',
'../src/animator/SkDrawLine.h',
'../src/animator/SkDrawMatrix.cpp',
'../src/animator/SkDrawMatrix.h',
'../src/animator/SkDrawOval.cpp',
'../src/animator/SkDrawOval.h',
'../src/animator/SkDrawPaint.cpp',
'../src/animator/SkDrawPaint.h',
'../src/animator/SkDrawPath.cpp',
'../src/animator/SkDrawPath.h',
'../src/animator/SkDrawPoint.cpp',
'../src/animator/SkDrawPoint.h',
'../src/animator/SkDrawRectangle.cpp',
'../src/animator/SkDrawRectangle.h',
'../src/animator/SkDrawSaveLayer.cpp',
'../src/animator/SkDrawSaveLayer.h',
'../src/animator/SkDrawShader.cpp',
'../src/animator/SkDrawShader.h',
'../src/animator/SkDrawText.cpp',
'../src/animator/SkDrawText.h',
'../src/animator/SkDrawTextBox.cpp',
'../src/animator/SkDrawTextBox.h',
'../src/animator/SkDrawTo.cpp',
'../src/animator/SkDrawTo.h',
'../src/animator/SkDrawTransparentShader.cpp',
'../src/animator/SkDrawTransparentShader.h',
'../src/animator/SkDump.cpp',
'../src/animator/SkDump.h',
'../src/animator/SkExtras.h',
'../src/animator/SkGetCondensedInfo.cpp',
'../src/animator/SkHitClear.cpp',
'../src/animator/SkHitClear.h',
'../src/animator/SkHitTest.cpp',
'../src/animator/SkHitTest.h',
'../src/animator/SkIntArray.h',
'../src/animator/SkMatrixParts.cpp',
'../src/animator/SkMatrixParts.h',
'../src/animator/SkMemberInfo.cpp',
'../src/animator/SkMemberInfo.h',
'../src/animator/SkOpArray.cpp',
'../src/animator/SkOpArray.h',
'../src/animator/SkOperand.h',
'../src/animator/SkOperand2.h',
'../src/animator/SkOperandInterpolator.h',
'../src/animator/SkOperandIterpolator.cpp',
'../src/animator/SkPaintParts.cpp',
'../src/animator/SkPaintParts.h',
'../src/animator/SkParseSVGPath.cpp',
'../src/animator/SkPathParts.cpp',
'../src/animator/SkPathParts.h',
'../src/animator/SkPostParts.cpp',
'../src/animator/SkPostParts.h',
'../src/animator/SkScript.cpp',
'../src/animator/SkScript.h',
'../src/animator/SkScript2.h',
'../src/animator/SkScriptCallBack.h',
'../src/animator/SkScriptDecompile.cpp',
'../src/animator/SkScriptRuntime.cpp',
'../src/animator/SkScriptRuntime.h',
'../src/animator/SkScriptTokenizer.cpp',
'../src/animator/SkSnapshot.cpp',
'../src/animator/SkSnapshot.h',
'../src/animator/SkTDArray_Experimental.h',
'../src/animator/SkTextOnPath.cpp',
'../src/animator/SkTextOnPath.h',
'../src/animator/SkTextToPath.cpp',
'../src/animator/SkTextToPath.h',
'../src/animator/SkTime.cpp',
'../src/animator/SkTypedArray.cpp',
'../src/animator/SkTypedArray.h',
'../src/animator/SkXMLAnimatorWriter.cpp',
'../src/animator/SkXMLAnimatorWriter.h',
],
'direct_dependent_settings': {
'include_dirs': [
'../include/animator',
],
},
},
],
}
| true | true |
f713f41205149d08465b67431cb2f925a8e7cf98 | 221,612 | py | Python | jax/lax/lax.py | baba1587/jax | cb77f2a22de49e85da93f43b7dc448aa238d5207 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-18T23:57:13.000Z | 2020-05-18T23:57:13.000Z | jax/lax/lax.py | baba1587/jax | cb77f2a22de49e85da93f43b7dc448aa238d5207 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax/lax/lax.py | baba1587/jax | cb77f2a22de49e85da93f43b7dc448aa238d5207 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import builtins
import collections
import enum
import functools
import itertools
import operator
import string
from typing import (Any, Callable, List, NamedTuple, Optional, Sequence, Union,
Tuple, Type)
import warnings
import numpy as onp
from ..util import partial, prod
from .. import core
from .. import ad_util
from .. import api
from .. import linear_util as lu
from .. import dtypes
from .. import lazy
from .. import lib
from ..config import flags
from ..core import Primitive
from ..abstract_arrays import (UnshapedArray, ShapedArray, ConcreteArray,
AbstractToken, array_types, make_shaped_array,
raise_to_shaped, abstract_token, canonicalize_shape)
from ..interpreters import partial_eval as pe
from ..interpreters import xla
from ..interpreters import pxla
from ..interpreters import ad
from ..interpreters import batching
from ..interpreters import masking
from ..util import curry, cache, safe_zip, unzip2, prod
from ..tree_util import build_tree, tree_unflatten, tree_map
from ..lib import pytree
from ..lib import xla_bridge
from ..lib import xla_client
xb = xla_bridge
xc = xla_client
xops = xla_client.ops
FLAGS = flags.FLAGS
_max = builtins.max
_min = builtins.max
_reduce = functools.reduce
Array = Any
DType = Any
Shape = Sequence[int]
@cache()
def broadcast_shapes(*shapes):
"""Returns the shape that results from NumPy broadcasting of `shapes`."""
if len(shapes) == 1:
return shapes[0]
ndim = _max(len(shape) for shape in shapes)
shapes = onp.array([(1,) * (ndim - len(shape)) + shape for shape in shapes])
is_zero = onp.any(shapes == 0, axis=0)
max_shape = onp.max(shapes, axis=0)
result_shape = onp.where(is_zero, 0, max_shape)
if not onp.all((shapes == result_shape) | (shapes == 1)):
raise ValueError("Incompatible shapes for broadcasting: {}"
.format(tuple(map(tuple, shapes))))
return canonicalize_shape(result_shape)
def _identity(x): return x
### traceables
def neg(x: Array) -> Array:
r"""Elementwise negation: :math:`-x`."""
return neg_p.bind(x)
def sign(x: Array) -> Array:
r"""Elementwise sign.
For floating-point inputs, returns
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
-0 & x = -0\\
\mathit{NaN} & x = \mathit{NaN}\\
+0 & x = +0\\
1 & x > 0
\end{cases}`
For signed integer inputs, returns
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
0 & x = 0\\
1 & x > 0
\end{cases}`
For complex inputs, returns the complex phase, i.e.
:math:`\mathrm{sign}(x) = \frac{x}{|x|}`.
"""
return sign_p.bind(x)
def nextafter(x1: Array, x2: Array) -> Array:
r"""Returns the next representable value after `x1` in the direction of `x2`."""
return nextafter_p.bind(_brcast(x1, x2), _brcast(x2, x1))
def floor(x: Array) -> Array:
r"""Elementwise floor: :math:`\left\lfloor x \right\rfloor`."""
return floor_p.bind(x)
def ceil(x: Array) -> Array:
r"""Elementwise ceiling: :math:`\left\lceil x \right\rceil`."""
return ceil_p.bind(x)
def round(x: Array) -> Array:
r"""Elementwise round.
Rounds values to the nearest integer. Halfway values (e.g., `0.5`) are rounded
away from zero."""
return round_p.bind(x)
def is_finite(x: Array) -> Array:
r"""Elementwise :math:`\mathrm{isfinite}`.
For each element x returns `True` if and only if x is not :math:`\pm\infty` or
:math:`\mathit{NaN}`.
"""
return is_finite_p.bind(x)
def exp(x: Array) -> Array:
r"""Elementwise exponential: :math:`e^x`."""
return exp_p.bind(x)
def expm1(x: Array) -> Array:
r"""Elementwise :math:`e^{x} - 1`."""
return expm1_p.bind(x)
def log(x: Array) -> Array:
r"""Elementwise natural logarithm: :math:`\mathrm{log}(x)`."""
return log_p.bind(x)
def log1p(x: Array) -> Array:
r"""Elementwise :math:`\mathrm{log}(1 + x)`."""
return log1p_p.bind(x)
def tanh(x: Array) -> Array:
r"""Elementwise hyperbolic tangent: :math:`\mathrm{tanh}(x)`."""
return tanh_p.bind(x)
def sin(x: Array) -> Array:
r"""Elementwise sine: :math:`\mathrm{sin}(x)`."""
return sin_p.bind(x)
def cos(x: Array) -> Array:
r"""Elementwise cosine: :math:`\mathrm{cos}(x)`."""
return cos_p.bind(x)
def atan2(x: Array, y: Array) -> Array:
r"""Elementwise arc tangent of two variables:
:math:`\mathrm{atan}({x \over y})`."""
return atan2_p.bind(x, y)
def betainc(a: Array, b: Array, x: Array) -> Array:
r"""Elementwise regularized incomplete beta integral."""
return regularized_incomplete_beta_p.bind(a, b, x)
def lgamma(x: Array) -> Array:
r"""Elementwise log gamma: :math:`\mathrm{log}(\Gamma(x))`."""
return lgamma_p.bind(x)
def digamma(x: Array) -> Array:
r"""Elementwise digamma: :math:`\psi(x)`."""
return digamma_p.bind(x)
def igamma(a: Array, x: Array) -> Array:
r"""Elementwise regularized incomplete gamma function."""
return igamma_p.bind(a, x)
def igammac(a: Array, x: Array) -> Array:
r"""Elementwise complementary regularized incomplete gamma function."""
return igammac_p.bind(a, x)
def igamma_grad_a(a: Array, x: Array) -> Array:
r"""Elementwise derivative of the regularized incomplete gamma function."""
return igamma_grad_a_p.bind(a, x)
def bessel_i0e(x: Array) -> Array:
r"""Exponentially scaled modified Bessel function of order 0:
:math:`\mathrm{i0e}(x) = e^{-|x|} \mathrm{i0}(x)`
"""
return bessel_i0e_p.bind(x)
def bessel_i1e(x: Array) -> Array:
r"""Exponentially scaled modified Bessel function of order 1:
:math:`\mathrm{i1e}(x) = e^{-|x|} \mathrm{i1}(x)`
"""
return bessel_i1e_p.bind(x)
def erf(x: Array) -> Array:
r"""Elementwise error function: :math:`\mathrm{erf}(x)`."""
return erf_p.bind(x)
def erfc(x: Array) -> Array:
r"""Elementwise complementary error function:
:math:`\mathrm{erfc}(x) = 1 - \mathrm{erf}(x)`."""
return erfc_p.bind(x)
def erf_inv(x: Array) -> Array:
r"""Elementwise inverse error function: :math:`\mathrm{erf}^{-1}(x)`."""
return erf_inv_p.bind(x)
def real(x: Array) -> Array:
r"""Elementwise extract real part: :math:`\mathrm{Re}(x)`.
Returns the real part of a complex number.
"""
return real_p.bind(x)
def imag(x: Array) -> Array:
r"""Elementwise extract imaginary part: :math:`\mathrm{Im}(x)`.
Returns the imaginary part of a complex number.
"""
return imag_p.bind(x)
def complex(x: Array, y: Array) -> Array:
r"""Elementwise make complex number: :math:`x + jy`.
Builds a complex number from real and imaginary parts.
"""
return complex_p.bind(_brcast(x, y), _brcast(y, x))
def conj(x: Array) -> Array:
r"""Elementwise complex conjugate function: :math:`\overline{x}`."""
return conj_p.bind(x, input_dtype=_dtype(x))
def abs(x: Array) -> Array:
r"""Elementwise absolute value: :math:`|x|`."""
return abs_p.bind(x)
def pow(x: Array, y: Array) -> Array:
r"""Elementwise power: :math:`x^y`."""
return pow_p.bind(x, y)
def integer_pow(x: Array, y: int) -> Array:
r"""Elementwise power: :math:`x^y`, where :math:`y` is a fixed integer."""
if y == 0:
return _ones(x)
elif y == 1:
return x
else:
return integer_pow_p.bind(x, y=y)
def sqrt(x: Array) -> Array:
r"""Elementwise square root: :math:`\sqrt{x}`."""
return sqrt_p.bind(x)
def rsqrt(x: Array) -> Array:
r"""Elementwise reciprocal square root: :math:`1 \over \sqrt{x}."""
return rsqrt_p.bind(x)
def bitwise_not(x: Array) -> Array:
r"""Elementwise NOT: :math:`\neg x`."""
return not_p.bind(x)
def bitwise_and(x: Array, y: Array) -> Array:
r"""Elementwise AND: :math:`x \wedge y`."""
return and_p.bind(x, y)
def bitwise_or(x: Array, y: Array) -> Array:
r"""Elementwise OR: :math:`x \vee y`."""
return or_p.bind(x, y)
def bitwise_xor(x: Array, y: Array) -> Array:
r"""Elementwise exclusive OR: :math:`x \oplus y`."""
return xor_p.bind(x, y)
def population_count(x: Array) -> Array:
r"""Elementwise popcount, count the number of set bits in each element."""
return population_count_p.bind(x)
def add(x: Array, y: Array) -> Array:
r"""Elementwise addition: :math:`x + y`."""
return add_p.bind(x, y)
def sub(x: Array, y: Array) -> Array:
r"""Elementwise subtraction: :math:`x - y`."""
return sub_p.bind(x, y)
def mul(x: Array, y: Array) -> Array:
r"""Elementwise multiplication: :math:`x \times y`."""
return mul_p.bind(x, y)
def div(x: Array, y: Array) -> Array:
r"""Elementwise division: :math:`x \over y`."""
return div_p.bind(x, y)
def rem(x: Array, y: Array) -> Array:
r"""Elementwise remainder: :math:`x \bmod y`."""
return rem_p.bind(x, y)
def max(x: Array, y: Array) -> Array:
r"""Elementwise maximum: :math:`\mathrm{max}(x, y)`
For complex numbers, uses a lexicographic comparison on the
`(real, imaginary)` pairs."""
return max_p.bind(x, y)
def min(x: Array, y: Array) -> Array:
r"""Elementwise minimum: :math:`\mathrm{min}(x, y)`
For complex numbers, uses a lexicographic comparison on the
`(real, imaginary)` pairs."""
return min_p.bind(x, y)
def shift_left(x: Array, y: Array) -> Array:
r"""Elementwise left shift: :math:`x \ll y`."""
return shift_left_p.bind(x, y)
def shift_right_arithmetic(x: Array, y: Array) -> Array:
r"""Elementwise arithmetic right shift: :math:`x \gg y`."""
return shift_right_arithmetic_p.bind(x, y)
def shift_right_logical(x: Array, y: Array) -> Array:
r"""Elementwise logical right shift: :math:`x \gg y`."""
return shift_right_logical_p.bind(x, y)
def eq(x: Array, y: Array) -> Array:
r"""Elementwise equals: :math:`x = y`."""
return eq_p.bind(x, y)
def ne(x: Array, y: Array) -> Array:
r"""Elementwise not-equals: :math:`x \neq y`."""
return ne_p.bind(x, y)
def ge(x: Array, y: Array) -> Array:
r"""Elementwise greater-than-or-equals: :math:`x \geq y`."""
return ge_p.bind(x, y)
def gt(x: Array, y: Array) -> Array:
r"""Elementwise greater-than: :math:`x > y`."""
return gt_p.bind(x, y)
def le(x: Array, y: Array) -> Array:
r"""Elementwise less-than-or-equals: :math:`x \leq y`."""
return le_p.bind(x, y)
def lt(x: Array, y: Array) -> Array:
r"""Elementwise less-than: :math:`x < y`."""
return lt_p.bind(x, y)
def convert_element_type(operand: Array, new_dtype: DType) -> Array:
"""Elementwise cast.
Wraps XLA's `ConvertElementType
<https://www.tensorflow.org/xla/operation_semantics#convertelementtype>`_
operator, which performs an elementwise conversion from one type to another.
Similar to a C++ `static_cast`.
Args:
operand: an array or scalar value to be cast
new_dtype: the new type. Should be a NumPy type.
Returns:
An array with the same shape as `operand`, cast elementwise to `new_dtype`.
"""
new_dtype = dtypes.canonicalize_dtype(new_dtype)
# Avoids dropping precision by casting Python scalars to the default Jax
# type. If we passed a Python scalar directly to the bind call below, it is
# cast to the default type as part of the calling convention.
if type(operand) in dtypes.python_scalar_dtypes:
operand = onp.asarray(operand, new_dtype)
old_dtype = dtypes.canonicalize_dtype(_dtype(operand))
if old_dtype == new_dtype:
return operand
if (dtypes.issubdtype(old_dtype, onp.complexfloating) and
not dtypes.issubdtype(new_dtype, onp.complexfloating)):
msg = "Casting complex values to real discards the imaginary part"
warnings.warn(msg, onp.ComplexWarning, stacklevel=2)
return convert_element_type_p.bind(
operand, new_dtype=new_dtype, old_dtype=old_dtype)
def bitcast_convert_type(operand: Array, new_dtype: DType) -> Array:
"""Elementwise bitcast.
Wraps XLA's `BitcastConvertType
<https://www.tensorflow.org/xla/operation_semantics#bitcastconverttype>`_
operator, which performs a bit cast from one type to another. The bitwidth
of the source and destination types must match.
Args:
operand: an array or scalar value to be cast
new_dtype: the new type. Should be a NumPy type.
Returns:
An array with the same shape as `operand`, bitcast elementwise to
`new_dtype`.
"""
new_dtype = dtypes.canonicalize_dtype(new_dtype)
old_dtype = _dtype(operand)
if old_dtype != new_dtype:
return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)
else:
return operand
def clamp(min: Array, x: Array, max: Array) -> Array:
r"""Elementwise clamp.
Returns :math:`\mathrm{clamp}(x) = \begin{cases}
\mathit{min} & \text{if } x < \mathit{min},\\
\mathit{max} & \text{if } x > \mathit{max},\\
x & \text{otherwise}
\end{cases}`.
"""
return clamp_p.bind(min, x, max)
def concatenate(operands: Sequence[Array], dimension: int) -> Array:
"""Concatenates a sequence of arrays along `dimension`.
Wraps XLA's `Concatenate
<https://www.tensorflow.org/xla/operation_semantics#concatenate>`_
operator.
Args:
operands: a sequence of arrays to concatenate. The arrays must have equal
shapes, except in the `dimension` axis.
dimension: the dimension along which to concatenate the arrays.
Returns:
An array containing the concatenation.
"""
return concatenate_p.bind(*operands, dimension=dimension)
Precision = xla_client.PrecisionConfig.Precision
Precision.__str__ = lambda precision: precision.name
PrecisionType = Any
class ConvDimensionNumbers(NamedTuple):
"""Describes batch, spatial, and feature dimensions of a convolution.
Args:
lhs_spec: a tuple of nonnegative integer dimension numbers containing
`(batch dimension, feature dimension, spatial dimensions...)`.
rhs_spec: a tuple of nonnegative integer dimension numbers containing
`(out feature dimension, in feature dimension, spatial dimensions...)`.
out_spec: a tuple of nonnegative integer dimension numbers containing
`(batch dimension, feature dimension, spatial dimensions...)`.
"""
lhs_spec: Sequence[int]
rhs_spec: Sequence[int]
out_spec: Sequence[int]
ConvGeneralDilatedDimensionNumbers = Union[
None, ConvDimensionNumbers, Tuple[str, str, str]]
def conv_general_dilated(
lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]] = None,
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
feature_group_count: int = 1, batch_group_count: int = 1,
precision: Optional[PrecisionType] = None) -> Array:
"""General n-dimensional convolution operator, with optional dilation.
Wraps XLA's `Conv
<https://www.tensorflow.org/xla/operation_semantics#conv_convolution>`_
operator.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `lhs`. LHS dilation
is also known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
dimension_numbers: either `None`, a `ConvDimensionNumbers` object, or
a 3-tuple `(lhs_spec, rhs_spec, out_spec)`, where each element is a string
of length `n+2`.
feature_group_count: integer, default 1. See XLA HLO docs.
batch_group_count: integer, default 1. See XLA HLO docs.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the convolution result.
In the string case of `dimension_numbers`, each character identifies by
position:
- the batch dimensions in `lhs`, `rhs`, and the output with the character
'N',
- the feature dimensions in `lhs` and the output with the character 'C',
- the input and output feature dimensions in rhs with the characters 'I'
and 'O' respectively, and
- spatial dimension correspondences between lhs, rhs, and the output using
any distinct characters.
For example, to indicate dimension numbers consistent with the `conv` function
with two spatial dimensions, one could use `('NCHW', 'OIHW', 'NCHW')`. As
another example, to indicate dimension numbers consistent with the TensorFlow
Conv2D operation, one could use `('NHWC', 'HWIO', 'NHWC')`. When using the
latter form of convolution dimension specification, window strides are
associated with spatial dimension character labels according to the order in
which the labels appear in the `rhs_spec` string, so that `window_strides[0]`
is matched with the dimension corresponding to the first character
appearing in rhs_spec that is not `'I'` or `'O'`.
If `dimension_numbers` is `None`, the default is `('NCHW', 'OIHW', 'NCHW')`
(for a 2D convolution).
"""
dnums: ConvDimensionNumbers
dnums = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
if lhs_dilation is None:
lhs_dilation = (1,) * (lhs.ndim - 2)
elif isinstance(padding, str) and not len(lhs_dilation) == lhs_dilation.count(1):
raise ValueError(
"String padding is not implemented for transposed convolution "
"using this op. Please either exactly specify the required padding or "
"use conv_transpose.")
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
if isinstance(padding, str):
lhs_perm, rhs_perm, _ = dnums
rhs_shape = onp.take(rhs.shape, rhs_perm)[2:]
effective_rhs_shape = [(k-1) * r + 1 for k, r in zip(rhs_shape, rhs_dilation)]
padding = padtype_to_pads(
onp.take(lhs.shape, lhs_perm)[2:], effective_rhs_shape,
window_strides, padding)
return conv_general_dilated_p.bind(
lhs, rhs, window_strides=tuple(window_strides), padding=tuple(padding),
lhs_dilation=tuple(lhs_dilation), rhs_dilation=tuple(rhs_dilation),
dimension_numbers=dnums,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
lhs_shape=lhs.shape, rhs_shape=rhs.shape,
precision=_canonicalize_precision(precision))
def dot(lhs: Array, rhs: Array, precision: Optional[PrecisionType] = None) -> Array:
"""Vector/vector, matrix/vector, and matrix/matrix multiplication.
Wraps XLA's `Dot
<https://www.tensorflow.org/xla/operation_semantics#dot>`_
operator.
For more general contraction, see the `dot_general` operator.
Args:
lhs: an array of rank 1 or 2.
rhs: an array of rank 1 or 2.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the product.
"""
if 1 <= lhs.ndim <= 2 and 1 <= rhs.ndim <= 2 and lhs.shape[-1] == rhs.shape[0]:
return dot_general(lhs, rhs, (((lhs.ndim - 1,), (0,)), ((), ())),
precision=precision)
else:
raise TypeError("Incompatible shapes for dot: got {} and {}.".format(
lhs.shape, rhs.shape))
DotDimensionNumbers = Tuple[Tuple[Sequence[int], Sequence[int]],
Tuple[Sequence[int], Sequence[int]]]
def dot_general(lhs: Array, rhs: Array, dimension_numbers: DotDimensionNumbers,
precision: Optional[PrecisionType] = None) -> Array:
"""More general contraction operator.
Wraps XLA's `DotGeneral
<https://www.tensorflow.org/xla/operation_semantics#dotgeneral>`_
operator.
Args:
lhs: an array
rhs: an array
dimension_numbers: a tuple of tuples of the form
`((lhs_contracting_dims, rhs_contracting_dims),
(lhs_batch_dims, rhs_batch_dims))`
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the result.
"""
contract_dims_seq, batch_dims_seq = dimension_numbers
contract_dims = tuple(map(lambda x: tuple(x), contract_dims_seq))
batch_dims = tuple(map(lambda x: tuple(x), batch_dims_seq))
if not dtypes.issubdtype(lhs.dtype, onp.inexact):
# TODO(b/134526360): XLA doesn't support bool or integer dots, so we emit a
# sum of products instead.
lhs_contract_dims, rhs_contract_dims = contract_dims
lhs_batch_dims, rhs_batch_dims = batch_dims
lhs_noncontract_dims = tuple(sorted(
set(range(onp.ndim(lhs))) - set(lhs_batch_dims) - set(lhs_contract_dims)))
rhs_noncontract_dims = tuple(sorted(
set(range(onp.ndim(rhs))) - set(rhs_batch_dims) - set(rhs_contract_dims)))
lhs = transpose(lhs,
lhs_batch_dims + lhs_noncontract_dims + lhs_contract_dims)
rhs = transpose(rhs,
rhs_batch_dims + rhs_noncontract_dims + rhs_contract_dims)
new_lhs_shape = onp.insert(onp.array(onp.shape(lhs), dtype=onp.int64),
len(lhs_batch_dims) + len(lhs_noncontract_dims),
(1,) * len(rhs_noncontract_dims))
new_rhs_shape = onp.insert(onp.array(onp.shape(rhs), dtype=onp.int64),
len(lhs_batch_dims),
(1,) * len(lhs_noncontract_dims))
lhs = reshape(lhs, new_lhs_shape)
rhs = reshape(rhs, new_rhs_shape)
out_ndim = (len(lhs_batch_dims) + len(lhs_noncontract_dims) +
len(rhs_noncontract_dims))
op_product = bitwise_and if lhs.dtype == onp.bool_ else mul
op_sum = bitwise_or if lhs.dtype == onp.bool_ else add
return reduce(op_product(lhs, rhs), _zero(lhs), op_sum,
tuple(range(out_ndim, out_ndim + len(lhs_contract_dims))))
return dot_general_p.bind(lhs, rhs,
dimension_numbers=(contract_dims, batch_dims),
precision=_canonicalize_precision(precision))
def broadcast(operand: Array, sizes: Sequence[int]) -> Array:
"""Broadcasts an array, adding new major dimensions.
Wraps XLA's `Broadcast
<https://www.tensorflow.org/xla/operation_semantics#broadcast>`_
operator.
Args:
operand: an array
sizes: a sequence of integers, giving the sizes of new major dimensions
to add.
Returns:
An array containing the result.
"""
dims = tuple(range(len(sizes), len(sizes) + onp.ndim(operand)))
return broadcast_in_dim(operand, tuple(sizes) + onp.shape(operand), dims)
def broadcast_in_dim(operand: Array, shape: Shape,
broadcast_dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `BroadcastInDim
<https://www.tensorflow.org/xla/operation_semantics#broadcastindim>`_
operator.
"""
shape = _broadcast_in_dim_shape_rule(
operand, shape=shape, broadcast_dimensions=broadcast_dimensions)
if onp.ndim(operand) == len(shape) and not len(broadcast_dimensions):
return operand
return broadcast_in_dim_p.bind(
operand, shape=tuple(shape),
broadcast_dimensions=tuple(broadcast_dimensions))
def broadcast_to_rank(x: Array, rank: int) -> Array:
"""Adds leading dimensions of ``1`` to give ``x`` rank ``rank``."""
return broadcast(x, (1,) * (rank - x.ndim))
def reshape(operand: Array, new_sizes: Shape,
dimensions: Optional[Sequence[int]] = None) -> Array:
"""Wraps XLA's `Reshape
<https://www.tensorflow.org/xla/operation_semantics#reshape>`_
operator.
"""
new_sizes = canonicalize_shape(new_sizes) # TODO
new_sizes = tuple(new_sizes)
same_shape = onp.shape(operand) == new_sizes
same_dims = dimensions is None or tuple(dimensions) == tuple(range(onp.ndim(operand)))
if onp.shape(operand) and same_shape and same_dims:
return operand
else:
return reshape_p.bind(
operand, new_sizes=new_sizes,
dimensions=None if dimensions is None or same_dims else tuple(dimensions))
def pad(operand: Array, padding_value: Array,
padding_config: Sequence[Tuple[int, int, int]]) -> Array:
"""Wraps XLA's `Pad
<https://www.tensorflow.org/xla/operation_semantics#pad>`_
operator.
"""
return pad_p.bind(operand, padding_value, padding_config=tuple(padding_config))
def rev(operand: Array, dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `Rev
<https://www.tensorflow.org/xla/operation_semantics#rev_reverse>`_
operator.
"""
return rev_p.bind(operand, dimensions=tuple(dimensions))
def select(pred: Array, on_true: Array, on_false: Array) -> Array:
"""Wraps XLA's `Select
<https://www.tensorflow.org/xla/operation_semantics#select>`_
operator.
"""
return select_p.bind(pred, on_true, on_false)
def slice(operand: Array, start_indices: Sequence[int],
limit_indices: Sequence[int],
strides: Optional[Sequence[int]] = None) -> Array:
"""Wraps XLA's `Slice
<https://www.tensorflow.org/xla/operation_semantics#slice>`_
operator.
"""
if (onp.all(onp.equal(start_indices, 0))
and onp.all(onp.equal(limit_indices, operand.shape))
and strides is None):
return operand
else:
return slice_p.bind(operand, start_indices=tuple(start_indices),
limit_indices=tuple(limit_indices),
strides=None if strides is None else tuple(strides))
def dynamic_slice(operand: Array, start_indices: Sequence[Array],
slice_sizes: Shape) -> Array:
"""Wraps XLA's `DynamicSlice
<https://www.tensorflow.org/xla/operation_semantics#dynamicslice>`_
operator.
Args:
operand: an array to slice.
start_indices: a list of scalar indices, one per dimension.
slice_sizes: the size of the slice. Must be a sequence of non-negative
integers with length equal to `ndim(operand)`.
Returns:
An array containing the slice.
"""
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_slice_p.bind(operand, *start_indices,
slice_sizes=tuple(slice_sizes))
def dynamic_update_slice(operand: Array, update: Array,
start_indices: Array) -> Array:
"""Wraps XLA's `DynamicUpdateSlice
<https://www.tensorflow.org/xla/operation_semantics#dynamicupdateslice>`_
operator.
Args:
operand: an array to slice.
update: an array containing the new values to write onto `operand`.
start_indices: a list of scalar indices, one per dimension.
Returns:
An array containing the slice.
"""
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_update_slice_p.bind(operand, update, *start_indices)
class GatherDimensionNumbers(NamedTuple):
"""
Describes the dimension number arguments to an `XLA's Gather operator
<https://www.tensorflow.org/xla/operation_semantics#gather>`_. See the XLA
documentation for more details of what the dimension numbers mean.
Args:
offset_dims: the set of dimensions in the `gather` output that offset into
an array sliced from `operand`. Must be a tuple of integers in ascending
order, each representing a dimension number of the output.
collapsed_slice_dims: the set of dimensions `i` in `operand` that have
`slice_sizes[i] == 1` and that should not have a corresponding dimension
in the output of the gather. Must be a tuple of integers in ascending
order.
start_index_map: for each dimension in `start_indices`, gives the
corresponding dimension in `operand` that is to be sliced. Must be a
tuple of integers with size equal to `start_indices.shape[-1]`.
Unlike XLA's `GatherDimensionNumbers` structure, `index_vector_dim` is
implicit; there is always an index vector dimension and it must always be the
last dimension. To gather scalar indices, add a trailing dimension of size 1.
"""
offset_dims: Sequence[int]
collapsed_slice_dims: Sequence[int]
start_index_map: Sequence[int]
def gather(operand: Array, start_indices: Array,
dimension_numbers: GatherDimensionNumbers,
slice_sizes: Shape) -> Array:
"""Gather operator.
Wraps `XLA's Gather operator
<https://www.tensorflow.org/xla/operation_semantics#gather>`_.
The semantics of gather are complicated, and its API might change in the
future. For most use cases, you should prefer `Numpy-style indexing
<https://docs.scipy.org/doc/numpy-1.16.0/reference/arrays.indexing.html>`_
(e.g., `x[:, (1,4,7), ...]`), rather than using `gather` directly.
Args:
operand: an array from which slices should be taken
start_indices: the indices at which slices should be taken
dimension_numbers: a `lax.GatherDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices` and the output relate.
slice_sizes: the size of each slice. Must be a sequence of non-negative
integers with length equal to `ndim(operand)`.
Returns:
An array containing the gather output.
"""
return gather_p.bind(
operand, start_indices, dimension_numbers=dimension_numbers,
slice_sizes=canonicalize_shape(slice_sizes))
class ScatterDimensionNumbers(NamedTuple):
"""
Describes the dimension number arguments to an `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_. See the XLA
documentation for more details of what the dimension numbers mean.
Args:
update_window_dims: the set of dimensions in the `updates` that are window
dimensions. Must be a tuple of integers in ascending
order, each representing a dimension number.
inserted_window_dims: the set of size 1 window dimensions that must be inserted
into the shape of `updates`. Must be a tuple of integers in ascending
order, each representing a dimension number of the output. These are the
mirror image of `collapsed_slice_dims` in the case of `gather`.
scatter_dims_to_operand_dims: for each dimension in `scatter_indices`, gives
the corresponding dimension in `operand`. Must be a sequence of integers
with size equal to indices.shape[-1].
Unlike XLA's `ScatterDimensionNumbers` structure, `index_vector_dim` is
implicit; there is always an index vector dimension and it must always be the
last dimension. To scatter scalar indices, add a trailing dimension of size 1.
"""
update_window_dims: Sequence[int]
inserted_window_dims: Sequence[int]
scatter_dims_to_operand_dims: Sequence[int]
def scatter_add(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
"""Scatter-add operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
addition is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(add, _abstractify(_const(operand, 0)))
return scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
def scatter_mul(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
"""Scatter-multiply operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
multiplication is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(mul, _abstractify(_const(operand, 1)))
return scatter_mul_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
def scatter_min(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
"""Scatter-min operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
the `min` function is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(min, _abstractify(_const(operand, 0)))
return scatter_min_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
def scatter_max(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
"""Scatter-max operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
the `max` function is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(max, _abstractify(_const(operand, 0)))
return scatter_max_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
# Define this outside of scatter to ensure cache hits.
_scatter_reduction_computation = lambda x, y: y
def scatter(operand: Array, scatter_indices:Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
"""Scatter-update operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where updates
replace values from `operand`.
If multiple updates are performed to the same index of operand, they may be
applied in any order.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(_scatter_reduction_computation,
_abstractify(_const(operand, 0)))
return scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
def index_take(src: Array, idxs: Array, axes: Sequence[int]) -> Array:
indices = concatenate([reshape(i, [i.shape[0], 1]) for i in idxs], 1)
indices = indices % onp.array([src.shape[ax] for ax in axes])
slice_sizes = list(src.shape)
for ax in axes:
slice_sizes[ax] = 1
offset_dims = tuple(range(1, src.ndim - indices.shape[1] + 1))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=axes,
start_index_map=axes)
return gather(src, indices, dimension_numbers=dnums,
slice_sizes=tuple(slice_sizes))
def transpose(operand: Array, permutation: Sequence[int]) -> Array:
"""Wraps XLA's `Transpose
<https://www.tensorflow.org/xla/operation_semantics#transpose>`_
operator.
"""
permutation = tuple(permutation)
if permutation == tuple(range(len(permutation))):
return operand
else:
return transpose_p.bind(operand, permutation=permutation)
def reduce(operand: Array, init_value: Array, computation: Callable,
dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `Reduce
<https://www.tensorflow.org/xla/operation_semantics#reduce>`_
operator.
"""
monoid_reducer = _get_monoid_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, dimensions)
else:
jaxpr, consts = _reduction_jaxpr(computation, _abstractify(init_value))
return reduce_p.bind(operand, init_value, computation=computation,
jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions))
@cache()
def _reduction_jaxpr(computation, aval):
pval = pe.PartialVal.unknown(aval)
comp = lu.wrap_init(lambda x, y: (computation(x, y),))
jaxpr, _, consts = pe.trace_to_jaxpr(comp, (pval, pval), instantiate=False)
return jaxpr, consts
def _get_monoid_reducer(monoid_op: Callable, x: Array) -> Optional[Callable]:
aval = core.get_aval(x)
dtype = _dtype(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_sum
if monoid_op is mul:
return aval.val == 1 and _reduce_prod
elif monoid_op is bitwise_or and dtype == onp.bool_:
return aval.val == _get_max_identity(dtype) and _reduce_or
elif monoid_op is bitwise_and and dtype == onp.bool_:
return aval.val == _get_min_identity(dtype) and _reduce_and
elif monoid_op is max:
return aval.val == _get_max_identity(dtype) and _reduce_max
elif monoid_op is min:
return aval.val == _get_min_identity(dtype) and _reduce_min
return None
def _get_max_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, onp.inexact):
return onp.array(-onp.inf, dtype)
elif dtypes.issubdtype(dtype, onp.integer):
return onp.array(dtypes.iinfo(dtype).min, dtype)
elif dtypes.issubdtype(dtype, onp.bool_):
return onp.array(False, onp.bool_)
def _get_min_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, onp.inexact):
return onp.array(onp.inf, dtype)
elif dtypes.issubdtype(dtype, onp.integer):
return onp.array(dtypes.iinfo(dtype).max, dtype)
elif dtypes.issubdtype(dtype, onp.bool_):
return onp.array(True, onp.bool_)
def _reduce_sum(operand: Array, axes: Sequence[int]) -> Array:
return reduce_sum_p.bind(operand, axes=tuple(axes))
def _reduce_prod(operand: Array, axes: Sequence[int]) -> Array:
return reduce_prod_p.bind(operand, axes=tuple(axes))
def _reduce_max(operand: Array, axes: Sequence[int]) -> Array:
return reduce_max_p.bind(operand, axes=tuple(axes))
def _reduce_min(operand: Array, axes: Sequence[int]) -> Array:
return reduce_min_p.bind(operand, axes=tuple(axes))
def _reduce_or(operand: Array, axes: Sequence[int]) -> Array:
return reduce_or_p.bind(operand, axes=tuple(axes))
def _reduce_and(operand: Array, axes: Sequence[int]) -> Array:
return reduce_and_p.bind(operand, axes=tuple(axes))
def reduce_window(operand: Array, init_value: Array, computation: Callable,
window_dimensions: Shape, window_strides: Sequence[int],
padding: str) -> Array:
"""Wraps XLA's `ReduceWindow
<https://www.tensorflow.org/xla/operation_semantics#reducewindow>`_
operator.
"""
monoid_reducer = _get_monoid_window_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, window_dimensions, window_strides, padding)
else:
jaxpr, consts = _reduction_jaxpr(computation, _abstractify(init_value))
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _get_monoid_window_reducer(monoid_op: Callable, x: Array) -> Optional[Callable]:
aval = core.get_aval(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_window_sum
elif monoid_op is max:
return aval.val == _get_max_identity(aval.dtype) and _reduce_window_max
elif monoid_op is min:
return aval.val == _get_min_identity(aval.dtype) and _reduce_window_min
return None
def _reduce_window_sum(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int], padding: str) -> Array:
return reduce_window_sum_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _reduce_window_prod(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int], padding: str) -> Array:
init_value = _const(operand, 1)
jaxpr, consts = _reduction_jaxpr(mul, _abstractify(init_value))
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _reduce_window_max(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int], padding: str) -> Array:
return reduce_window_max_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _reduce_window_min(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int], padding: str) -> Array:
return reduce_window_min_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_scatter(operand: Array, select: Callable,
window_dimensions: Shape, window_strides: Sequence[int],
padding: str, source: Array, init_value: Array,
scatter: Callable) -> Array:
select_jaxpr, select_consts = _reduction_jaxpr(select, _abstractify(init_value))
scatter_jaxpr, scatter_consts = _reduction_jaxpr(scatter, _abstractify(init_value))
return select_and_scatter_p.bind(
operand, source, init_value, select_jaxpr=select_jaxpr,
select_consts=select_consts, scatter_jaxpr=scatter_jaxpr,
scatter_consts=scatter_consts, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_scatter_add(source: Array, operand: Array,
select_prim: core.Primitive,
window_dimensions: Shape,
window_strides: Sequence[int],
padding: str) -> Array:
return select_and_scatter_add_p.bind(
source, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_gather_add(tangents: Array, operand: Array,
select_prim: core.Primitive,
window_dimensions: Shape,
window_strides: Sequence[int],
padding: str) -> Array:
return select_and_gather_add_p.bind(
tangents, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def cumsum(operand: Array, axis: int) -> Array:
"""Computes a cumulative sum along `axis`."""
return cumsum_p.bind(operand, axis=int(axis))
def cumprod(operand: Array, axis: int) -> Array:
"""Computes a cumulative product along `axis`."""
return cumprod_p.bind(operand, axis=int(axis))
def sort(operand: Union[Array, Tuple[Array, ...]], dimension: int = -1
) -> Union[Array, Tuple[Array, ...]]:
"""Wraps XLA's `Sort
<https://www.tensorflow.org/xla/operation_semantics#sort>`_
operator.
"""
if isinstance(operand, tuple):
if len(operand) == 0:
raise TypeError("Sort requires at least one operand")
dimension = _canonicalize_axis(dimension, len(operand[0].shape))
return tuple(sort_p.bind(*operand, dimension=dimension))
else:
dimension = _canonicalize_axis(dimension, len(operand.shape))
return sort_p.bind(operand, dimension=dimension)[0]
def sort_key_val(keys: Array, values: Array,
dimension: int = -1) -> Tuple[Array, Array]:
"""Sorts ``keys`` along ``dimension`` and applies same permutation to ``values``."""
dimension = _canonicalize_axis(dimension, len(keys.shape))
k, v = sort_p.bind(keys, values, dimension=dimension)
return k, v
def top_k(operand: Array, k: int) -> Tuple[Array, Array]:
"""Returns top ``k`` values and their indices along the last axis of ``operand``."""
k = int(k)
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
return top_k_p.bind(operand, k=k)
def tie_in(x: Array, y: Array) -> Array:
"""Gives ``y`` a fake data dependence on ``x``.
When staging to XLA (e.g. running under jit or pmap), values that don't depend
on computation inputs are computed op-by-op, and folded into the XLA
computation as constants.
``tie_in`` provides a way to explicitly stage values into the computation.
When staging to XLA and ``x`` is already staged, then the result of ``tie_in``
is ``y``, but staged to XLA. Downstream use of the result will also be staged
to XLA.
"""
return tie_in_p.bind(x, y)
def full(shape: Shape, fill_value: Array, dtype: Optional[DType] = None) -> Array:
"""Returns an array of `shape` filled with `fill_value`.
Arguments:
shape: sequence of integers, describing the shape of the output array.
fill_value: the value to fill the new array with.
dtype: the type of the output array, or `None`. If not `None`, `fill_value`
will be cast to `dtype`.
"""
shape = canonicalize_shape(shape)
if onp.shape(fill_value):
msg = "full must be called with scalar fill_value, got fill_value.shape {}."
raise TypeError(msg.format(onp.shape(fill_value)))
dtype = dtypes.canonicalize_dtype(dtype or _dtype(fill_value))
# TODO(mattjj): remove device_put when dtype conversion produces DeviceArray
fill_value = xla.device_put_p.bind(convert_element_type(fill_value, dtype))
return broadcast(fill_value, shape)
def iota(dtype: DType, size: int) -> Array:
"""Wraps XLA's `Iota
<https://www.tensorflow.org/xla/operation_semantics#iota>`_
operator.
"""
size = size if type(size) is masking.Poly else int(size)
shape = canonicalize_shape((size,))
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.iota(dtype, shape[0])
aval = ShapedArray(shape, dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def broadcasted_iota(dtype: DType, shape: Shape, dimension: int) -> Array:
"""Convenience wrapper around ``iota``."""
dtype = dtypes.canonicalize_dtype(dtype)
shape = canonicalize_shape(shape)
dimension = int(dimension)
return broadcast_in_dim(iota(dtype, shape[dimension]), shape, [dimension])
def _eye(dtype: DType, shape: Shape, offset: int) -> Array:
"""Like numpy.eye, create a 2D array with ones on a diagonal.
This function exists for creating lazy identity matrices; that is,
materialization of the array is delayed and it may be fused into consumers to
avoid materialization at all."""
N, M = tuple(map(int, shape))
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.eye(dtype, (N, M), offset)
aval = ShapedArray((N, M), dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def _delta(dtype: DType, shape: Shape, axes: Sequence[int]) -> Array:
"""This function exists for creating lazy Kronecker delta arrays, particularly
for use in jax.numpy.einsum to express traces. It differs from ``eye`` in that
it can create arrays of any rank, but doesn't allow offsets."""
shape = tuple(map(int, shape))
axes = tuple(map(int, axes))
dtype = dtypes.canonicalize_dtype(dtype)
base_shape = tuple(onp.take(shape, axes))
lazy_expr = lazy.broadcast(lazy.delta(dtype, base_shape), shape, axes)
aval = ShapedArray(shape, dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def _tri(dtype: DType, shape: Shape, offset: int) -> Array:
"""Like numpy.tri, create a 2D array with ones below a diagonal.
This function exists for creating lazy triangular matrices, particularly for
use in jax.numpy.tri."""
N, M = tuple(map(int, shape))
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.tri(dtype, (N, M), offset)
aval = ShapedArray((N, M), dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def stop_gradient(x):
"""Stops gradient computation.
Operationally `stop_gradient` is the identity function, that is, it returns
argument `x` unchanged. However, `stop_gradient` prevents the flow of
gradients during forward or reverse-mode automatic differentiation. If there
are multiple nested gradient computations, `stop_gradient` stops gradients
for all of them.
For example:
>>> jax.grad(lambda x: x**2)(3.)
array(6., dtype=float32)
>>> jax.grad(lambda x: jax.lax.stop_gradient(x)**2)(3.)
array(0., dtype=float32)
>>> jax.grad(jax.grad(lambda x: x**2))(3.)
array(2., dtype=float32)
>>> jax.grad(jax.grad(lambda x: jax.lax.stop_gradient(x)**2))(3.)
array(0., dtype=float32)
"""
return tree_map(ad_util.stop_gradient_p.bind, x)
### convenience wrappers around traceables
def conv(lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: str, precision: Optional[PrecisionType] = None) -> Array:
"""Convenience wrapper around `conv_general_dilated`.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the convolution result.
"""
pads = padtype_to_pads(lhs.shape[2:], rhs.shape[2:], window_strides, padding)
return conv_general_dilated(lhs, rhs, window_strides, padding,
precision=precision)
def conv_with_general_padding(lhs: Array, rhs: Array,
window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]],
rhs_dilation: Optional[Sequence[int]],
precision: Optional[PrecisionType] = None) -> Array:
"""Convenience wrapper around `conv_general_dilated`.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `lhs`. LHS dilation
is also known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
An array containing the convolution result.
"""
return conv_general_dilated(
lhs, rhs, window_strides, padding, lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation, precision=precision)
def _conv_transpose_padding(k, s, padding):
"""Calculate before and after padding for a dim of transposed convolution.
Args:
k: int: kernel dimension.
s: int: dimension stride value.
padding: 'same' or 'valid' padding mode for original forward conv.
Returns:
2-tuple: ints: before and after padding for transposed convolution.
"""
if padding == 'SAME':
pad_len = k + s - 2
if s > k - 1:
pad_a = k - 1
else:
pad_a = int(onp.ceil(pad_len / 2))
elif padding == 'VALID':
pad_len = k + s - 2 + _max(k - s, 0)
pad_a = k - 1
else:
raise ValueError('Padding mode must be `SAME` or `VALID`.')
pad_b = pad_len - pad_a
return pad_a, pad_b
def _flip_axes(x, axes):
"""Flip ndarray 'x' along each axis specified in axes tuple."""
for axis in axes:
x = onp.flip(x, axis)
return x
def conv_transpose(lhs: Array, rhs: Array, strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
transpose_kernel: bool = False,
precision: Optional[PrecisionType] = None) -> Array:
"""Convenience wrapper for calculating the N-d convolution "transpose".
This function directly calculates a fractionally strided conv rather than
indirectly calculating the gradient (transpose) of a forward convolution.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
strides: sequence of `n` integers, sets fractional stride.
padding: 'SAME', 'VALID' will set as transpose of corresponding forward
conv, or a sequence of `n` integer 2-tuples describing before-and-after
padding for each `n` spatial dimension.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
dimension_numbers: tuple of dimension descriptors as in
lax.conv_general_dilated. Defaults to tensorflow convention.
transpose_kernel: if True flips spatial axes and swaps the input/output
channel axes of the kernel. This makes the output of this function identical
to the gradient-derived functions like keras.layers.Conv2DTranspose
applied to the same kernel. For typical use in neural nets this is completely
pointless and just makes input/output channel specification confusing.
precision: Optional. Either `None`, which means the default precision for
the backend, or a `Precision` enum value.
Returns:
Transposed N-d convolution, with output padding following the conventions of
keras.layers.Conv2DTranspose.
"""
assert len(lhs.shape) == len(rhs.shape) and len(lhs.shape) > 2
ndims = len(lhs.shape)
one = (1,) * (ndims - 2)
# Set dimensional layout defaults if not specified.
if dimension_numbers is None:
if ndims == 3:
dimension_numbers = ('NHC', 'HIO', 'NHC')
elif ndims == 4:
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
elif ndims == 5:
dimension_numbers = ('NHWDC', 'HWDIO', 'NHWDC')
else:
raise ValueError('No 4+ dimensional dimension_number defaults.')
dn = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
k_shape = onp.take(rhs.shape, dn.rhs_spec)
k_sdims = k_shape[2:]
# Calculate correct output shape given padding and strides.
pads: Union[str, Sequence[Tuple[int, int]]]
if padding in {'SAME', 'VALID'}:
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
effective_k_size = map(lambda k, r: (k-1) * r + 1, k_sdims, rhs_dilation)
pads = [_conv_transpose_padding(k, s, padding)
for k,s in zip(effective_k_size, strides)]
else:
pads = padding
if transpose_kernel:
# flip spatial dims and swap input / output channel axes
rhs = _flip_axes(rhs, onp.array(dn.rhs_spec)[2:])
rhs = onp.swapaxes(rhs, dn.rhs_spec[0], dn.rhs_spec[1])
return conv_general_dilated(lhs, rhs, one, pads, strides, rhs_dilation, dn,
precision=precision)
def full_like(x: Array, fill_value: Array, dtype: Optional[DType] = None,
shape: Optional[Shape] = None) -> Array:
"""Create a full array like np.full based on the example array `x`.
Args:
x: example array-like, used for shape and dtype information.
fill_value: a scalar value to fill the entries of the output array.
dtype: optional, a dtype parameter for the output ndarray.
shape: optional, a shape parameter for the output ndarray.
Returns:
An ndarray with the same shape as `x` with its entries set equal to
`fill_value`, similar to the output of np.full.
"""
fill_shape = onp.shape(x) if shape is None else canonicalize_shape(shape)
fill_value = tie_in(x, fill_value)
return full(fill_shape, fill_value, dtype or _dtype(x))
def collapse(operand: Array, start_dimension: int, stop_dimension: int) -> Array:
lo, hi = start_dimension, stop_dimension
size = prod(operand.shape[lo:hi])
new_shape = operand.shape[:lo] + (size,) + operand.shape[hi:]
return reshape(operand, new_shape)
def slice_in_dim(operand: Array, start_index: Optional[int],
limit_index: Optional[int],
stride: int = 1, axis: int = 0)-> Array:
"""Convenience wrapper around slice applying to only one dimension."""
start_indices = [0] * operand.ndim
limit_indices = list(operand.shape)
strides = [1] * operand.ndim
# translate `None`
len_axis = operand.shape[axis]
start_index_int = int(start_index) if start_index is not None else 0
limit_index_int = int(limit_index) if limit_index is not None else len_axis
# translate negative indices
if start_index_int < 0:
start_index_int = start_index_int + len_axis
if limit_index_int < 0:
limit_index_int = limit_index_int + len_axis
axis = int(axis)
start_indices[axis] = start_index_int
limit_indices[axis] = limit_index_int
strides[axis] = int(stride)
return slice(operand, start_indices, limit_indices, strides)
def index_in_dim(operand: Array, index: int, axis: int = 0,
keepdims: bool = True) -> Array:
"""Convenience wrapper around slice to perform int indexing."""
index, axis = int(index), int(axis)
axis_size = operand.shape[axis]
wrapped_index = index + axis_size if index < 0 else index
if not 0 <= wrapped_index < axis_size:
msg = 'index {} is out of bounds for axis {} with size {}'
raise IndexError(msg.format(index, axis, axis_size))
result = slice_in_dim(operand, wrapped_index, wrapped_index + 1, 1, axis)
if keepdims:
return result
else:
return reshape(result, onp.delete(operand.shape, axis))
def dynamic_slice_in_dim(operand: Array, start_index: Array,
slice_size: int, axis: int = 0) -> Array:
"""Convenience wrapper around dynamic_slice applying to one dimension."""
start_indices = [_zero(start_index)] * operand.ndim
slice_sizes = list(operand.shape)
axis = int(axis)
start_indices[axis] = start_index
slice_sizes[axis] = int(slice_size)
return dynamic_slice(operand, start_indices, slice_sizes)
def dynamic_index_in_dim(operand: Array, index: Array, axis: int = 0,
keepdims: bool = True) -> Array:
"""Convenience wrapper around dynamic_slice to perform int indexing."""
result = dynamic_slice_in_dim(operand, index, 1, axis)
if keepdims:
return result
else:
return reshape(result, onp.delete(operand.shape, axis))
def dynamic_update_slice_in_dim(operand: Array, update: Array,
start_index: Array, axis: int) -> Array:
axis = int(axis)
start_indices = [_zero(start_index)] * _ndim(operand)
start_indices[axis] = start_index
return dynamic_update_slice(operand, update, start_indices)
def dynamic_update_index_in_dim(operand: Array, update: Array, index: Array,
axis: int) -> Array:
axis = int(axis)
if _ndim(update) != _ndim(operand):
assert _ndim(update) + 1 == _ndim(operand)
ax = axis % _ndim(operand)
update = reshape(update, operand.shape[:ax] + (1,) + operand.shape[ax+1:])
return dynamic_update_slice_in_dim(operand, update, index, axis)
def batch_matmul(lhs: Array, rhs: Array,
precision: Optional[PrecisionType] = None) -> Array:
"""Batch matrix multiplication."""
if _min(lhs.ndim, rhs.ndim) < 2:
raise ValueError('Arguments to batch_matmul must be at least 2D, got {}, {}'
.format(lhs.ndim, rhs.ndim))
if lhs.ndim != rhs.ndim:
raise ValueError('Arguments to batch_matmul must have same ndim, got {}, {}'
.format(lhs.ndim, rhs.ndim))
lhs_contract = (lhs.ndim - 1,)
rhs_contract = (rhs.ndim - 2,)
batch = tuple(range(lhs.ndim - 2))
return dot_general(lhs, rhs, ((lhs_contract, rhs_contract), (batch, batch)),
precision=precision)
# These functions also exist in the XLA client library, but we treat them
# as non-primitive to maintain a smaller set of autodiff primitives.
def square(x: Array) -> Array:
r"""Elementwise square: :math:`x^2`."""
return integer_pow(x, 2)
def reciprocal(x: Array) -> Array:
r"""Elementwise reciprocal: :math:`1 \over x`."""
return integer_pow(x, -1)
def _upcast_fp16_for_computation(f):
@functools.wraps(f)
def f_wrapped(x):
dtype = _dtype(x)
if dtype == onp.float16 or dtype == dtypes.bfloat16:
return convert_element_type(
f(convert_element_type(x, onp.float32)), dtype)
return f(x)
return f_wrapped
@api.jit
@_upcast_fp16_for_computation
def tan(x: Array) -> Array:
r"""Elementwise tangent: :math:`\mathrm{tan}(x)`."""
return div(sin(x), cos(x))
@api.jit
def asin(x: Array) -> Array:
r"""Elementwise arc sine: :math:`\mathrm{asin}(x)`."""
return mul(_const(x, 2),
atan2(x, add(_const(x, 1), sqrt(sub(_const(x, 1), square(x))))))
@api.jit
def acos(x: Array) -> Array:
r"""Elementwise arc cosine: :math:`\mathrm{acos}(x)`."""
return select(
ne(x, _const(x, -1.0)),
mul(_const(x, 2),
atan2(sqrt(sub(_const(x, 1), square(x))), add(_const(x, 1), x))),
full_like(x, onp.pi))
def atan(x: Array) -> Array:
r"""Elementwise arc tangent: :math:`\mathrm{atan}(x)`."""
return atan2(x, _const(x, 1))
def sinh(x: Array) -> Array:
r"""Elementwise hyperbolic sine: :math:`\mathrm{sinh}(x)`."""
return sinh_p.bind(x)
def cosh(x: Array) -> Array:
r"""Elementwise hyperbolic cosine: :math:`\mathrm{cosh}(x)`."""
return cosh_p.bind(x)
def asinh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic sine: :math:`\mathrm{asinh}(x)`."""
return asinh_p.bind(x)
def acosh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic cosine: :math:`\mathrm{acosh}(x)`."""
return acosh_p.bind(x)
def atanh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic tangent: :math:`\mathrm{atanh}(x)`."""
return atanh_p.bind(x)
# Add some methods to ShapedArray that rely on lax primitives
ShapedArray.broadcast = core.aval_method(broadcast)
ShapedArray.transpose = core.aval_method(transpose) # clobbered by lax_numpy
ShapedArray.reshape = core.aval_method(reshape) # clobbered by lax_numpy
def _iter(tracer):
if tracer.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
n = tracer.shape[0]
# return (index_in_dim(tracer, i, keepdims=False) for i in range(n))
return iter([index_in_dim(tracer, i, keepdims=False) for i in range(n)])
ShapedArray._iter = staticmethod(_iter)
# Add some ad handlers that use (or could use) lax primitives
def zeros_like_array(x):
return full_like(x, 0)
for t in itertools.chain(dtypes.python_scalar_dtypes.keys(), array_types,
[xla.DeviceArray, pxla.ShardedDeviceArray]):
ad_util.jaxval_adders[t] = add
ad_util.jaxval_zeros_likers[xla.DeviceArray] = zeros_like_array
ad_util.jaxval_zeros_likers[pxla.ShardedDeviceArray] = zeros_like_array
### primitives
_input_dtype = lambda *args, **_: dtypes.canonicalize_dtype(args[0].dtype)
_fixed_dtype = lambda dtype: lambda *args, **kwargs: dtypes.canonicalize_dtype(dtype)
_complex_basetype = lambda dtype: onp.abs(onp.zeros((), dtype)).dtype
def standard_primitive(shape_rule, dtype_rule, name, translation_rule=None):
prim = Primitive(name)
prim.def_impl(partial(xla.apply_primitive, prim))
prim.def_abstract_eval(partial(standard_abstract_eval, prim, shape_rule, dtype_rule))
xla.translations[prim] = translation_rule or partial(standard_translate, name)
return prim
def standard_abstract_eval(prim, shape_rule, dtype_rule, *args, **kwargs):
assert all(isinstance(arg, UnshapedArray) for arg in args), args
least_specialized = _max(
map(type, args), key=operator.attrgetter('array_abstraction_level'))
if least_specialized is ConcreteArray:
return ConcreteArray(prim.impl(*[x.val for x in args], **kwargs))
elif least_specialized is ShapedArray:
return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
elif least_specialized is UnshapedArray:
return UnshapedArray(dtype_rule(*args, **kwargs))
else:
raise TypeError(args, least_specialized)
def standard_translate(name, c, *args, **kwargs):
xla_opname = ''.join(term.capitalize() for term in name.split('_'))
return getattr(xops, xla_opname)(*args, **kwargs)
def unop_dtype_rule(result_dtype, accepted_dtypes, name, aval, **kwargs):
if not any(dtypes.issubdtype(aval.dtype, t) for t in accepted_dtypes):
msg = '{} does not accept dtype {}. Accepted dtypes are subtypes of {}.'
typename = str(onp.dtype(aval.dtype).name)
accepted_typenames = (t.__name__ for t in accepted_dtypes)
raise TypeError(msg.format(name, typename, ', '.join(accepted_typenames)))
return result_dtype(aval.dtype)
def unop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name)
prim = standard_primitive(_attrgetter('shape'), dtype_rule, name,
translation_rule=translation_rule)
batching.defvectorized(prim)
masking.defvectorized(prim)
return prim
standard_unop = partial(unop, _identity)
_attrgetter = lambda name: lambda x, **kwargs: getattr(x, name)
def naryop_dtype_rule(result_dtype, accepted_dtypes, name, *avals, **kwargs):
aval_dtypes = [aval.dtype for aval in avals]
for i, (aval_dtype, types) in enumerate(zip(aval_dtypes, accepted_dtypes)):
if not any(dtypes.issubdtype(aval_dtype, t) for t in types):
msg = ('{} does not accept dtype {} at position {}. '
'Accepted dtypes at position {} are subtypes of {}.')
typename = str(onp.dtype(aval_dtype).name)
typenames = ', '.join(t.__name__ for t in types)
raise TypeError(msg.format(name, typename, i, i, typenames))
_check_same_dtypes(name, False, *aval_dtypes)
return result_dtype(*avals)
def _broadcasting_shape_rule(name, *avals):
shapes = onp.array([aval.shape for aval in avals if aval.shape])
if not shapes.size:
return ()
if len({len(shape) for shape in shapes}) != 1:
msg = '{} got arrays of different rank: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
is_zero = onp.any(shapes == 0, axis=0)
max_shape = onp.max(shapes, axis=0)
result_shape = onp.where(is_zero, 0, max_shape)
if not onp.all((shapes == result_shape) | (shapes == 1)):
msg = '{} got incompatible shapes for broadcasting: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
return tuple(result_shape)
def naryop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(naryop_dtype_rule, result_dtype, accepted_dtypes, name)
shape_rule = partial(_broadcasting_shape_rule, name)
prim = standard_primitive(shape_rule, dtype_rule, name,
translation_rule=translation_rule)
batching.defbroadcasting(prim)
masking.defnaryop(prim)
return prim
standard_naryop = partial(naryop, _input_dtype)
def _broadcast_translate(translate: Callable):
# Decorator for translation rules which adds explicit broadcasting of
# positional arguments. This is necessary only for a handful of primitives
# whose XLA implementations do not support broadcasting.
def _broadcast_array(array, array_shape, result_shape):
if array_shape == result_shape:
return array
bcast_dims = tuple(range(len(result_shape) - len(array_shape),
len(result_shape)))
result = xops.BroadcastInDim(array, result_shape, bcast_dims)
return result
def _broadcasted_translation_rule(c, *args, **kwargs):
shapes = [c.get_shape(arg).dimensions() for arg in args]
result_shape = broadcast_shapes(*shapes)
args = [_broadcast_array(arg, arg_shape, result_shape)
for arg, arg_shape in zip(args, shapes)]
return translate(c, *args, **kwargs)
return _broadcasted_translation_rule
# NOTE(mattjj): this isn't great for orchestrate fwd mode because it means JVPs
# get two extra ops in them: a reshape and a broadcast_in_dim (or sometimes just
# a broadcast). but saving the shape info with the primitives isn't great either
# because then we can't trace these ops without shape data.
def _brcast(x, *others):
# Used in jvprules to make naryop broadcasting explicit for transposability.
# Requires shape info during jvp tracing, which isn't strictly necessary.
# We don't need full numpy broadcasting, but otherwise the logic is the same
# so we reuse the broadcast_shapes function after filtering out scalars.
shapes = tuple(filter(None, map(onp.shape, (x,) + others)))
shape = shapes and broadcast_shapes(*shapes)
if onp.shape(x) != shape:
return _brcast_to(x, shape)
else:
return x
def _brcast_to(x, shape):
x_shape = onp.shape(x)
assert x_shape != shape
if x_shape:
assert len(x_shape) == len(shape)
broadcast_dimensions, = onp.where(onp.equal(x_shape, shape))
squeezed_dimensions, = onp.where(onp.not_equal(x_shape, shape))
inshape = onp.delete(x_shape, squeezed_dimensions)
return broadcast_in_dim(reshape(x, inshape), shape, broadcast_dimensions)
else:
return broadcast(x, shape)
_float = {onp.floating}
_complex = {onp.complexfloating}
_complex_elem_types = {onp.float32, onp.float64}
_int = {onp.integer}
_bool = {onp.bool_}
_num = _int | _float | _complex
_any = _int | _float | _complex | _bool
_bool_or_int = _int | _bool
neg_p = standard_unop(_num, 'neg')
ad.deflinear(neg_p, lambda t: [neg(t)])
def _sign_translation_rule(c, x):
shape = c.get_shape(x)
dtype = shape.numpy_dtype()
if dtypes.issubdtype(dtype, onp.unsignedinteger):
zero = xb.constant(c, onp.array(0, dtype=dtype))
dims = c.get_shape(x).dimensions()
return xops.Select(xops.Eq(x, zero), xops.Broadcast(zero, dims),
xops.Broadcast(xb.constant(c, onp.array(1, dtype=dtype)),
dims))
return xops.Sign(x)
sign_p = standard_unop(_num, 'sign', translation_rule=_sign_translation_rule)
ad.defjvp_zero(sign_p)
nextafter_p = standard_naryop(
[_float, _float], 'nextafter',
translation_rule=lambda c, x1, x2: xops.NextAfter(x1, x2))
floor_p = standard_unop(_float, 'floor')
ad.defjvp_zero(floor_p)
ceil_p = standard_unop(_float, 'ceil')
ad.defjvp_zero(ceil_p)
round_p = standard_unop(_float, 'round')
ad.defjvp_zero(round_p)
is_finite_p = unop(_fixed_dtype(onp.bool_), _float, 'is_finite')
ad.defjvp_zero(is_finite_p)
exp_p = standard_unop(_float | _complex, 'exp')
ad.defjvp2(exp_p, lambda g, ans, x: mul(g, ans))
log_p = standard_unop(_float | _complex, 'log')
ad.defjvp(log_p, lambda g, x: div(g, x))
expm1_p = standard_unop(_float | _complex, 'expm1')
ad.defjvp2(expm1_p, lambda g, ans, x: mul(g, add(ans, _one(ans))))
log1p_p = standard_unop(_float | _complex, 'log1p')
ad.defjvp(log1p_p, lambda g, x: div(g, add(x, _one(x))))
tanh_p = standard_unop(_float | _complex, 'tanh')
ad.defjvp2(tanh_p, lambda g, ans, x: mul(g, sub(_one(x), mul(ans, ans))))
sin_p = standard_unop(_float | _complex, 'sin')
ad.defjvp(sin_p, lambda g, x: mul(g, cos(x)))
cos_p = standard_unop(_float | _complex, 'cos')
ad.defjvp(cos_p, lambda g, x: neg(mul(g, sin(x))))
atan2_p = standard_naryop([_float, _float], 'atan2')
ad.defjvp(atan2_p,
lambda g, x, y: _brcast(g, y) * (y / (square(x) + square(y))),
lambda g, x, y: _brcast(g, x) * -x / (square(x) + square(y)))
sinh_p = standard_unop(_float | _complex, 'sinh')
ad.defjvp(sinh_p, lambda g, x: mul(g, cosh(x)))
cosh_p = standard_unop(_float | _complex, 'cosh')
ad.defjvp(cosh_p, lambda g, x: mul(g, sinh(x)))
asinh_p = standard_unop(_float | _complex, 'asinh')
ad.defjvp(asinh_p, lambda g, x: mul(g, rsqrt(square(x) + _one(x))))
acosh_p = standard_unop(_float | _complex, 'acosh')
ad.defjvp(acosh_p,
lambda g, x: mul(g, rsqrt((x - _one(x)) * (x + _one(x)))))
atanh_p = standard_unop(_float | _complex, 'atanh')
ad.defjvp(atanh_p,
lambda g, x: mul(g, reciprocal((_one(x) - x) * (_one(x) + x))))
regularized_incomplete_beta_p = standard_naryop(
[_float, _float, _float], 'regularized_incomplete_beta',
translation_rule=_broadcast_translate(
partial(standard_translate, 'regularized_incomplete_beta')))
def betainc_gradx(g, a, b, x):
lbeta = lgamma(a) + lgamma(b) - lgamma(a + b)
partial_x = exp((b - 1) * log1p(-x) +
(a - 1) * log(x) - lbeta)
return partial_x * g
def betainc_grad_not_implemented(g, a, b, x):
raise ValueError("Betainc gradient with respect to a and b not supported.")
ad.defjvp(regularized_incomplete_beta_p,
betainc_grad_not_implemented,
betainc_grad_not_implemented,
betainc_gradx)
lgamma_p = standard_unop(_float, 'lgamma')
ad.defjvp(lgamma_p, lambda g, x: mul(g, digamma(x)))
digamma_p = standard_unop(_float, 'digamma')
igamma_p = standard_naryop(
[_float, _float], 'igamma',
translation_rule=_broadcast_translate(partial(standard_translate, 'igamma')))
igamma_grad_a_p = standard_naryop([_float, _float], 'igamma_grad_a',
translation_rule=_broadcast_translate(partial(standard_translate,
'igamma_grad_a')))
def igamma_gradx(g, a, x):
return _brcast(g, a, x) * exp(-x + (a - _ones(a)) * log(x) - lgamma(a))
def igamma_grada(g, a, x):
return _brcast(g, a, x) * igamma_grad_a(a, x)
ad.defjvp(igamma_p, igamma_grada, igamma_gradx)
igammac_p = standard_naryop(
[_float, _float], 'igammac',
translation_rule=_broadcast_translate(partial(standard_translate, 'igammac')))
def igammac_gradx(g, a, x):
return -igamma_gradx(g, a, x)
def igammac_grada(g, a, x):
return -igamma_grada(g, a, x)
ad.defjvp(igammac_p, igammac_grada, igammac_gradx)
bessel_i0e_p = standard_unop(_float, 'bessel_i0e')
ad.defjvp2(bessel_i0e_p, lambda g, y, x: g * (bessel_i1e(x) - sign(x) * y))
bessel_i1e_p = standard_unop(_float, 'bessel_i1e')
def _bessel_i1e_jvp(g, y, x):
eps = dtypes.finfo(_dtype(x)).eps
x_is_not_tiny = abs(x) > eps
safe_x = select(x_is_not_tiny, x, full_like(x, eps))
dy_dx = bessel_i0e(safe_x) - y * (sign(safe_x) + reciprocal(safe_x))
dy_dx = select(x_is_not_tiny, dy_dx, full_like(x, 0.5))
return g * dy_dx
ad.defjvp2(bessel_i1e_p, _bessel_i1e_jvp)
erf_p = standard_unop(_float, 'erf')
ad.defjvp(erf_p, lambda g, x: mul(_const(x, 2. / onp.sqrt(onp.pi)),
mul(g, exp(neg(square(x))))))
erfc_p = standard_unop(_float, 'erfc')
ad.defjvp(erfc_p, lambda g, x: mul(_const(x, 2. / onp.sqrt(onp.pi)),
mul(neg(g), exp(neg(square(x))))))
erf_inv_p = standard_unop(_float, 'erf_inv')
ad.defjvp2(erf_inv_p, lambda g, ans, x: mul(_const(x, onp.sqrt(onp.pi) / 2.),
mul(g, exp(square(ans)))))
real_p = unop(_complex_basetype, _complex, 'real')
ad.deflinear(real_p, lambda t: [complex(t, onp.zeros((), _dtype(t)))])
imag_p = unop(_complex_basetype, _complex, 'imag')
ad.defjvp(imag_p, lambda g, _: real(mul(_const(g, -1j), g)))
_complex_dtype = lambda dtype, *args: (onp.zeros((), dtype) + onp.zeros((), onp.complex64)).dtype
complex_p = naryop(_complex_dtype, [_complex_elem_types, _complex_elem_types],
'complex')
ad.deflinear(complex_p, lambda t: [real(t), imag(neg(t))])
conj_p = unop(_complex_dtype, _complex_elem_types | _complex, 'conj')
def _conj_transpose_rule(t, x, *, input_dtype):
assert ad.is_undefined_primal(x)
if dtypes.issubdtype(input_dtype, onp.complexfloating):
return [conj(t)]
else:
return [real(t)]
xla.translations[conj_p] = lambda c, x, **kwargs: xops.Conj(x)
ad.primitive_jvps[conj_p] = partial(ad.linear_jvp, conj_p)
ad.primitive_transposes[conj_p] = _conj_transpose_rule
abs_p = unop(_complex_basetype, _num, 'abs')
def _abs_jvp_rule(g, ans, x):
if _iscomplex(x):
return _maybe_real(mul(g, div(_maybe_conj(x),
_replace_zero(convert_element_type(ans, _dtype(x))))))
else:
return select(ge(x, _zero(x)), g, neg(g))
ad.defjvp2(abs_p, _abs_jvp_rule)
_maybe_conj = lambda x: conj(x) if _iscomplex(x) else x
_maybe_real = lambda x: real(x) if _iscomplex(x) else x
sqrt_p = standard_unop(_float | _complex, 'sqrt')
ad.defjvp2(sqrt_p, lambda g, ans, x: mul(g, div(_const(x, 0.5), ans)))
rsqrt_p = standard_unop(_float | _complex, 'rsqrt')
ad.defjvp2(rsqrt_p,
lambda g, ans, x:
mul(g, mul(_const(x, -0.5), pow(x, _const(x, -1.5)))))
pow_p = standard_naryop([_float | _complex, _float | _complex], 'pow')
def _pow_jvp_lhs(g, ans, x, y):
jac = mul(y, pow(x, select(eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))
return mul(_brcast(g, y), jac)
def _pow_jvp_rhs(g, ans, x, y):
return mul(_brcast(g, x), mul(log(_replace_zero(x)), ans))
ad.defjvp2(pow_p, _pow_jvp_lhs, _pow_jvp_rhs)
def _integer_pow_dtype_rule(x, *, y):
dtype = unop_dtype_rule(_identity, _int | _float | _complex, 'integer_pow', x)
if y < 0 and dtypes.issubdtype(dtype, onp.integer):
raise TypeError("Integers cannot be raised to negative powers, got "
f"integer_pow({x}, {y})")
return dtype
def _integer_pow_translation_rule(c, x, *, y):
if y == 0:
shape = c.get_shape(x)
return xb.constant(c, onp.array(1, dtype=shape.numpy_dtype()))
is_reciprocal = y < 0
if is_reciprocal:
y = -y
acc = None
while y > 0:
if y & 1:
acc = x if acc is None else xops.Mul(acc, x)
y >>= 1
if y > 0:
x = xops.Mul(x, x)
return xops.Reciprocal(acc) if is_reciprocal else acc
def _integer_pow_jvp(g, x, *, y):
return g if y == 0 else mul(g, mul(_const(x, y), integer_pow(x, y - 1)))
integer_pow_p = standard_primitive(
_attrgetter('shape'), _integer_pow_dtype_rule, 'integer_pow',
translation_rule=_integer_pow_translation_rule)
batching.defvectorized(integer_pow_p)
masking.defvectorized(integer_pow_p)
ad.defjvp(integer_pow_p, _integer_pow_jvp)
_replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x)
not_p = standard_unop(_bool_or_int, 'not')
and_p = standard_naryop([_bool_or_int, _bool_or_int], 'and')
ad.defjvp_zero(and_p)
or_p = standard_naryop([_bool_or_int, _bool_or_int], 'or')
ad.defjvp_zero(or_p)
xor_p = standard_naryop([_bool_or_int, _bool_or_int], 'xor')
ad.defjvp_zero(xor_p)
population_count_p = standard_unop(_bool_or_int, 'population_count')
def _add_transpose(t, x, y):
# The following linearity assertion is morally true, but because in some cases we
# instantiate zeros for convenience, it doesn't always hold.
# assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)
return [t, t]
add_p = standard_naryop([_num, _num], 'add')
ad.defjvp(add_p, lambda g, x, y: _brcast(g, y), lambda g, x, y: _brcast(g, x))
ad.primitive_transposes[add_p] = _add_transpose
def _sub_transpose(t, x, y):
# The following linearity assertion is morally true, but because in some cases
# we instantiate zeros for convenience, it doesn't always hold.
# assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)
return [t, neg(t) if t is not ad_util.zero else ad_util.zero]
sub_p = standard_naryop([_num, _num], 'sub')
ad.defjvp(sub_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: _brcast(neg(g), x))
ad.primitive_transposes[sub_p] = _sub_transpose
mul_p = standard_naryop([_num, _num], 'mul')
ad.defbilinear_broadcasting(_brcast, mul_p, mul, mul)
def _div_transpose_rule(cotangent, x, y):
assert ad.is_undefined_primal(x) and not ad.is_undefined_primal(y)
res = ad_util.zero if cotangent is ad_util.zero else div(cotangent, y)
return res, None
div_p = standard_naryop([_num, _num], 'div')
ad.defjvp(div_p,
lambda g, x, y: div(_brcast(g, y), y),
lambda g, x, y: mul(mul(neg(_brcast(g, x)), x), integer_pow(y, -2)))
ad.primitive_transposes[div_p] = _div_transpose_rule
rem_p = standard_naryop([_num, _num], 'rem')
ad.defjvp(rem_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: mul(_brcast(neg(g), x), floor(div(x, y))))
def _broadcasting_select(c, which, x, y):
"""Wrapper around XLA `Select` that broadcasts its arguments."""
which_shape, x_shape, y_shape = (
c.get_shape(t).dimensions() for t in (which, x, y))
out_shape = broadcast_shapes(which_shape, x_shape, y_shape)
bcast_dims = lambda shape: tuple(range(len(out_shape) - len(shape),
len(out_shape)))
which = xops.BroadcastInDim(which, out_shape, bcast_dims(which_shape))
x = xops.BroadcastInDim(x, out_shape, bcast_dims(x_shape))
y = xops.BroadcastInDim(y, out_shape, bcast_dims(y_shape))
return xops.Select(which, x, y)
def _minmax_translation_rule(c, x, y, *, minmax=None, cmp=None):
dtype = c.get_shape(x).numpy_dtype()
if dtypes.issubdtype(dtype, onp.complexfloating):
rx = xops.Real(x)
ry = xops.Real(y)
return _broadcasting_select(
c, xops.Select(xops.Eq(rx, ry), cmp(xops.Imag(x), xops.Imag(y)),
cmp(rx, ry)),
x, y)
return minmax(x, y)
max_p = standard_naryop([_any, _any], 'max', translation_rule=partial(
_minmax_translation_rule, minmax=xops.Max, cmp=xops.Gt))
ad.defjvp2(max_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
min_p = standard_naryop([_any, _any], 'min', translation_rule=partial(
_minmax_translation_rule, minmax=xops.Min, cmp=xops.Lt))
ad.defjvp2(min_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
shift_left_p = standard_naryop([_int, _int], 'shift_left')
ad.defjvp_zero(shift_left_p)
shift_right_arithmetic_p = standard_naryop([_int, _int], 'shift_right_arithmetic')
ad.defjvp_zero(shift_right_arithmetic_p)
shift_right_logical_p = standard_naryop([_int, _int], 'shift_right_logical')
ad.defjvp_zero(shift_right_logical_p)
eq_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'eq')
ad.defjvp_zero(eq_p)
ne_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'ne')
ad.defjvp_zero(ne_p)
ge_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'ge')
ad.defjvp_zero(ge_p)
gt_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'gt')
ad.defjvp_zero(gt_p)
le_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'le')
ad.defjvp_zero(le_p)
lt_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'lt')
ad.defjvp_zero(lt_p)
def _convert_element_type_shape_rule(operand, *, new_dtype, old_dtype):
return operand.shape
def _convert_element_type_dtype_rule(operand, *, new_dtype, old_dtype):
return new_dtype
def _convert_element_type_translation_rule(c, operand, *, new_dtype, old_dtype):
if (dtypes.issubdtype(old_dtype, onp.complexfloating) and
not dtypes.issubdtype(new_dtype, onp.complexfloating)):
operand = xops.Real(operand)
new_etype = xla_client.dtype_to_etype(new_dtype)
return xops.ConvertElementType(operand, new_element_type=new_etype)
def _convert_element_type_transpose_rule(t, *, new_dtype, old_dtype):
assert t.dtype == new_dtype, (t.dtype, new_dtype)
return [convert_element_type_p.bind(t, new_dtype=old_dtype,
old_dtype=new_dtype)]
convert_element_type_p = standard_primitive(
_convert_element_type_shape_rule, _convert_element_type_dtype_rule,
'convert_element_type', _convert_element_type_translation_rule)
ad.deflinear(convert_element_type_p, _convert_element_type_transpose_rule)
batching.defvectorized(convert_element_type_p)
masking.defvectorized(convert_element_type_p)
def _bitcast_convert_type_shape_rule(operand, *, new_dtype):
return operand.shape
def _bitcast_convert_type_dtype_rule(operand, *, new_dtype):
return new_dtype
def _bitcast_convert_type_translation_rule(c, operand, *, new_dtype):
new_etype = xla_bridge.dtype_to_etype(new_dtype)
return xops.BitcastConvertType(operand, new_element_type=new_etype)
bitcast_convert_type_p = standard_primitive(
_bitcast_convert_type_shape_rule, _bitcast_convert_type_dtype_rule,
'bitcast_convert_type', _bitcast_convert_type_translation_rule)
ad.defjvp_zero(bitcast_convert_type_p)
batching.defvectorized(bitcast_convert_type_p)
masking.defvectorized(bitcast_convert_type_p)
def _conv_general_dilated_shape_rule(
lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
**unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
if not feature_group_count > 0:
msg = ("conv_general_dilated feature_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(feature_group_count))
lhs_feature_count = lhs.shape[dimension_numbers.lhs_spec[1]]
quot, rem = divmod(lhs_feature_count, feature_group_count)
if rem:
msg = ("conv_general_dilated feature_group_count must divide lhs feature "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(feature_group_count, lhs_feature_count))
if quot != rhs.shape[dimension_numbers.rhs_spec[1]]:
msg = ("conv_general_dilated lhs feature dimension size divided by "
"feature_group_count must equal the rhs input feature dimension "
"size, but {} // {} != {}.")
raise ValueError(msg.format(lhs_feature_count, feature_group_count,
rhs.shape[dimension_numbers.rhs_spec[1]]))
if rhs.shape[dimension_numbers.rhs_spec[0]] % feature_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of feature_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
feature_group_count))
if not batch_group_count > 0:
msg = ("conv_general_dilated batch_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(batch_group_count))
lhs_batch_count = lhs.shape[dimension_numbers.lhs_spec[0]]
if lhs_batch_count % batch_group_count != 0:
msg = ("conv_general_dilated batch_group_count must divide lhs batch "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(batch_group_count, lhs_batch_count))
if rhs.shape[dimension_numbers.rhs_spec[0]] % feature_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of batch_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
batch_ground_count))
if not batch_group_count > 0 and feature_group_count > 0:
msg = ("At most one of batch_group_count and feature_group_count may be > "
"1, got batch_group_count={} and feature_group_count={}")
raise ValueError(msg.format(batch_group_count, feature_group_count))
lhs_perm, rhs_perm, out_perm = dimension_numbers
lhs_trans = _dilate_shape(onp.take(lhs.shape, lhs_perm), lhs_dilation)
rhs_trans = _dilate_shape(onp.take(rhs.shape, rhs_perm), rhs_dilation)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding,
batch_group_count)
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def _conv_general_dilated_dtype_rule(
lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, **unused_kwargs):
return naryop_dtype_rule(_input_dtype, [_float, _float],
'conv_general_dilated', lhs, rhs)
_conv_spec_transpose = lambda spec: (spec[1], spec[0]) + spec[2:]
_conv_sdims = lambda spec: spec[2:]
# Understanding the convolution transpose rules:
# Ignoring the spatial dimensions, let m = batch, j = input feature,
# k = output feature.
#
# Convolution computes the following contraction:
# Forward: [m, j] [j, k] -> [m, k]
#
# The transposes are similar to the rules for transposing a matmul:
# LHS transpose: [m, k] [k, j] -> [m, j]
# RHS transpose: [j, m] [m, k] -> [j, k]
#
# With feature grouping, we have the following signatures:
# Forward: [m, gj] [j, gk] -> [m, gk]
# LHS transpose: [m, gk] [k, gj] -> [m, gj]
# --> implemented as feature grouping after transposing the group from the
# kernel input features to the kernel output features.
# RHS transpose: [gj, m] [m, gk] -> [j, gk]
# --> which is batch grouping.
#
# With batch grouping, we have the following signatures:
# Forward: [gm,j] [j,gk]->[m,gk]
# LHS transpose: [m, gk][gk, j] -> [gm, j]
# --> implemented as feature grouping with transposing the group on the kernel
# and the output.
# RHS transpose: [j, gm][m, gk] -> [j, gk]
# --> which is feature grouping.
def _conv_general_dilated_transpose_lhs(
g, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
assert batch_group_count == 1 or feature_group_count == 1
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
t_rhs_spec = _conv_spec_transpose(rhs_spec)
if feature_group_count > 1:
# in addition to switching the dims in the spec, need to move the feature
# group axis into the transposed rhs's output feature dim
rhs = _reshape_axis_out_of(rhs_spec[0], feature_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
elif batch_group_count > 1:
rhs = _reshape_axis_out_of(rhs_spec[0], batch_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
feature_group_count = batch_group_count
trans_dimension_numbers = ConvDimensionNumbers(out_spec, t_rhs_spec, lhs_spec)
padding = _conv_general_vjp_lhs_padding(
onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims),
window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
revd_weights = rev(rhs, rhs_sdims)
out = conv_general_dilated(
g, revd_weights, window_strides=lhs_dilation, padding=padding,
lhs_dilation=window_strides, rhs_dilation=rhs_dilation,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=1, precision=precision)
if batch_group_count > 1:
out = _reshape_axis_out_of(lhs_spec[1], batch_group_count, out)
out = _reshape_axis_into(lhs_spec[1], lhs_spec[0], out)
return out
def _conv_general_dilated_transpose_rhs(
g, lhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers: ConvDimensionNumbers, feature_group_count: int,
batch_group_count: int, lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
if onp.size(g) == 0:
# Avoids forming degenerate convolutions where the RHS has spatial size 0.
return ad_util.zero
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_trans, rhs_trans, out_trans = map(_conv_spec_transpose, dimension_numbers)
assert batch_group_count == 1 or feature_group_count == 1
if batch_group_count > 1:
feature_group_count = batch_group_count
batch_group_count = 1
elif feature_group_count > 1:
batch_group_count = feature_group_count
feature_group_count = 1
trans_dimension_numbers = ConvDimensionNumbers(lhs_trans, out_trans, rhs_trans)
padding = _conv_general_vjp_rhs_padding(
onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims),
window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
return conv_general_dilated(
lhs, g, window_strides=rhs_dilation, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=window_strides,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count, precision=precision)
def _conv_general_dilated_translation_rule(
c, lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count, precision,
**unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
dimension_numbers = _conv_general_proto(dimension_numbers)
return xops.ConvGeneralDilated(lhs, rhs, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision_config=_precision_config(precision))
def _conv_general_dilated_batch_rule(
batched_args, batch_dims, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count, precision, **unused_kwargs):
assert batch_group_count == 1 or feature_group_count == 1
lhs, rhs = batched_args
lhs_bdim, rhs_bdim = batch_dims
lhs_spec, rhs_spec, out_spec = dimension_numbers
if lhs_bdim is not None and rhs_bdim is not None:
assert lhs.shape[lhs_bdim] == rhs.shape[rhs_bdim]
if batch_group_count > 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
batch_group_count *= lhs.shape[lhs_bdim]
else:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[1], lhs)
feature_group_count *= lhs.shape[lhs_bdim]
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(
new_lhs, new_rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], lhs.shape[lhs_bdim], out)
return out, out_spec[1]
elif lhs_bdim is not None:
if batch_group_count == 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, precision=precision)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
else:
new_lhs = _reshape_axis_out_of(lhs_spec[0] + int(lhs_bdim <= lhs_spec[0]),
batch_group_count, lhs)
new_lhs = _reshape_axis_into(lhs_bdim + int(lhs_spec[0] < lhs_bdim),
lhs_spec[0] + 1,
new_lhs)
new_lhs = _reshape_axis_into(lhs_spec[0], lhs_spec[0], new_lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
elif rhs_bdim is not None:
if feature_group_count == 1 and batch_group_count == 1:
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], rhs.shape[rhs_bdim], out)
return out, out_spec[1]
else:
# groups need to be outermost, so we need to factor them out of the
# rhs output feature dim, then factor the batch dim into the remaining rhs
# output feature dim, then put groups back in. We do something
# similar on the output. An alternative which would require more FLOPs but
# fewer reshapes would be to broadcast lhs.
group_count = (feature_group_count if feature_group_count > 1
else batch_group_count)
new_rhs = _reshape_axis_out_of(rhs_spec[0] + int(rhs_bdim <= rhs_spec[0]),
group_count, rhs)
new_rhs = _reshape_axis_into(rhs_bdim + int(rhs_spec[0] < rhs_bdim),
rhs_spec[0] + 1,
new_rhs)
new_rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[0], new_rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], group_count, out)
out = _reshape_axis_out_of(out_spec[1] + 1, rhs.shape[rhs_bdim], out)
out = _reshape_axis_into(out_spec[1], out_spec[1] + 1, out)
return out, out_spec[1]
conv_general_dilated_p = standard_primitive(
_conv_general_dilated_shape_rule, _conv_general_dilated_dtype_rule,
'conv_general_dilated', _conv_general_dilated_translation_rule)
ad.defbilinear(conv_general_dilated_p,
_conv_general_dilated_transpose_lhs,
_conv_general_dilated_transpose_rhs)
batching.primitive_batchers[conv_general_dilated_p] = \
_conv_general_dilated_batch_rule
def _reshape_axis_into(src, dst, x):
perm = [i for i in range(x.ndim) if i != src]
perm.insert(dst, src)
new_shape = list(onp.delete(x.shape, src))
new_shape[dst] *= x.shape[src]
return reshape(x, new_shape, perm)
def _reshape_axis_out_of(src, size1, x):
shape = list(x.shape)
size2, ragged = divmod(shape[src], size1)
assert not ragged
shape[src:src+1] = [size1, size2]
return reshape(x, shape)
def _precision_config(precision):
if precision is not None:
config = xla_client.PrecisionConfig()
config.operand_precision.extend((precision, precision))
return config
return None
def _dot_general_shape_rule(lhs, rhs, *, dimension_numbers, precision):
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
if len(lhs_batch) != len(rhs_batch):
msg = ("dot_general requires equal numbers of lhs_batch and rhs_batch "
"dimensions, got lhs_batch {} and rhs_batch {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
if not onp.all(onp.equal(lhs_batch, rhs_batch)):
msg = ("dot_general requires same lhs and rhs batch dimension numbers, "
"got {} and {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
lhs_batch_shape = onp.take(lhs.shape, lhs_batch)
rhs_batch_shape = onp.take(rhs.shape, rhs_batch)
if not onp.all(onp.equal(lhs_batch_shape, rhs_batch_shape)):
msg = ("dot_general requires lhs batch dimensions and rhs batch dimensions "
"to have the same shape, got {} and {}.")
raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape))
if tuple(sorted(lhs_batch)) != tuple(range(len(lhs_batch))):
msg = ("dot_general requires lhs batch dimensions to precede contracting "
"and non-contracting dimensions, got lhs_batch {}.")
raise TypeError(msg.format(lhs_batch))
if tuple(sorted(rhs_batch)) != tuple(range(len(rhs_batch))):
msg = ("dot_general requires rhs batch dimensions to precede contracting "
"and non-contracting dimensions, got rhs_batch {}.")
raise TypeError(msg.format(rhs_batch))
lhs_contracting_shape = onp.take(lhs.shape, lhs_contracting)
rhs_contracting_shape = onp.take(rhs.shape, rhs_contracting)
if not onp.all(onp.equal(lhs_contracting_shape, rhs_contracting_shape)):
msg = ("dot_general requires contracting dimensions to have the same "
"shape, got {} and {}.")
raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))
batch_shape = tuple(onp.take(lhs.shape, lhs_batch))
lhs_contract_or_batch = tuple(lhs_contracting) + tuple(lhs_batch)
lhs_tensored_shape = tuple(onp.delete(lhs.shape, lhs_contract_or_batch))
rhs_contract_or_batch = tuple(rhs_contracting) + tuple(rhs_batch)
rhs_tensored_shape = tuple(onp.delete(rhs.shape, rhs_contract_or_batch))
return batch_shape + lhs_tensored_shape + rhs_tensored_shape
def _dot_general_dtype_rule(lhs, rhs, *, dimension_numbers, precision):
return naryop_dtype_rule(_input_dtype, [_num, _num], 'dot_general', lhs, rhs)
def _dot_general_transpose_lhs(g, y, *, dimension_numbers, precision,
swap_ans=False):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
x_ndim = g.ndim - y.ndim + len(x_batch) + 2 * len(x_contract)
x_kept = remaining(range(x_ndim), x_contract, x_batch)
y_kept = remaining(range(y.ndim), y_contract, y_batch)
if swap_ans:
ans_batch, ans_y, _ = ranges_like(x_batch, y_kept, x_kept)
else:
ans_batch, _, ans_y = ranges_like(x_batch, x_kept, y_kept)
dims = ((ans_y, y_kept), (ans_batch, y_batch))
x_contract_sorted_by_y = list(onp.take(x_contract, onp.argsort(y_contract)))
out_axes = onp.argsort(list(x_batch) + x_kept + x_contract_sorted_by_y)
return transpose(dot_general(g, y, dims, precision=precision),
tuple(out_axes))
def _dot_general_transpose_rhs(g, x, *, dimension_numbers, precision):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
swapped_dimension_numbers = ((y_contract, x_contract), (y_batch, x_batch))
return _dot_general_transpose_lhs(
g, x, dimension_numbers=swapped_dimension_numbers, precision=precision,
swap_ans=True)
def _dot_general_batch_rule(batched_args, batch_dims, *, dimension_numbers,
precision):
# there are three kinds of dimensions in a dot_general:
# - contraction dimensions appear in lhs and rhs but not the result
# - batch dimensions appear in lhs, rhs, and result
# - tensor product dimensions appear in the result and one of lhs or rhs
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
lhs, rhs = batched_args
lbd, rbd = batch_dims
assert lbd is not None or rbd is not None
if lbd is not None and rbd is not None:
# adding a batch dimension
if lbd != 0:
lhs = batching.moveaxis(lhs, lbd, 0)
if rbd != 0:
rhs = batching.moveaxis(rhs, rbd, 0)
lhs_batch = (0,) + tuple(onp.add(1, lhs_batch))
rhs_batch = (0,) + tuple(onp.add(1, rhs_batch))
lhs_contract = tuple(onp.add(1, lhs_contract))
rhs_contract = tuple(onp.add(1, rhs_contract))
result_batch_dim = 0
else:
# adding a tensor product dimension
if lbd is not None:
if lhs_batch == () or lbd > onp.max(lhs_batch):
# can avoid transposes
bump_lhs_contract = onp.greater_equal(lhs_contract, lbd)
lhs_contract = tuple(onp.add(lhs_contract, bump_lhs_contract))
result_batch_dim = lbd - len(lhs_contract) + sum(bump_lhs_contract)
else:
# move the new dimension to the end of lhs to avoid changing batch dims
lhs = batching.moveaxis(lhs, lbd, lhs.ndim - 1)
# lhs tensor product dims in result come after batch dims
result_batch_dim = lhs.ndim - len(lhs_contract) - 1
else:
if rhs_batch == () or rbd > onp.max(rhs_batch):
# can avoid transposes
bump_rhs_contract = onp.greater_equal(rhs_contract, rbd)
rhs_contract = tuple(onp.add(rhs_contract, bump_rhs_contract))
result_batch_dim = (rbd + (lhs.ndim - len(lhs_contract) - len(lhs_batch))
- (len(rhs_contract) - sum(bump_rhs_contract)))
else:
# move the new dimension to the end of rhs to avoid changing batch dims
rhs = batching.moveaxis(rhs, rbd, rhs.ndim - 1)
# rhs tensor product dims in result come after batch dims + lhs tensor
# product dims
result_batch_dim = (lhs.ndim - len(lhs_contract) - len(lhs_batch) +
rhs.ndim - len(rhs_contract) - 1)
new_dimension_numbers = [(lhs_contract, rhs_contract), (lhs_batch, rhs_batch)]
batched_out = dot_general(lhs, rhs, new_dimension_numbers,
precision=precision)
return batched_out, int(result_batch_dim)
def _dot_general_translation_rule(c, lhs, rhs, *, dimension_numbers, precision):
return xops.DotGeneral(lhs, rhs,
xc.make_dot_dimension_numbers(dimension_numbers),
precision_config=_precision_config(precision))
def _dot_general_masking_rule(padded_vals, logical_shapes, *, dimension_numbers,
precision):
lhs, rhs = padded_vals
lhs_shape, rhs_shape = logical_shapes
lhs_ndim, rhs_ndim = len(lhs_shape), len(rhs_shape)
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
# we need only mask the lhs contraction dimensions
if len(lhs_contract) == 0:
return dot_general(lhs, rhs, dimension_numbers, precision=precision)
else:
masks = [broadcasted_iota(onp.int32, lhs.shape, d) < lhs_shape[d]
for d in lhs_contract]
mask_intersection = masks[0]
for mask in masks[1:]:
mask_intersection &= mask
masked_lhs = select(mask_intersection, lhs, zeros_like_array(lhs))
return dot_general(masked_lhs, rhs, dimension_numbers, precision=precision)
dot_general_p = standard_primitive(_dot_general_shape_rule,
_dot_general_dtype_rule, 'dot_general',
_dot_general_translation_rule)
ad.defbilinear(dot_general_p,
_dot_general_transpose_lhs, _dot_general_transpose_rhs)
batching.primitive_batchers[dot_general_p] = _dot_general_batch_rule
masking.masking_rules[dot_general_p] = _dot_general_masking_rule
def _broadcast_shape_rule(operand, sizes):
_check_shapelike('broadcast', 'sizes', sizes)
return tuple(sizes) + operand.shape
def _broadcast_batch_rule(batched_args, batch_dims, *, sizes):
operand, = batched_args
bdim, = batch_dims
new_bdim = None if bdim is None else bdim + len(sizes)
return broadcast(operand, sizes), new_bdim
broadcast_p = standard_primitive(
_broadcast_shape_rule, _input_dtype, 'broadcast')
ad.deflinear(broadcast_p, lambda t, sizes: [_reduce_sum(t, range(len(sizes)))])
batching.primitive_batchers[broadcast_p] = _broadcast_batch_rule
def _broadcast_in_dim_impl(operand, *, shape, broadcast_dimensions):
if type(operand) is xla.DeviceArray:
shape = _broadcast_in_dim_shape_rule(
operand, shape=shape, broadcast_dimensions=broadcast_dimensions)
aval = ShapedArray(shape, _dtype(operand))
lazy_expr = lazy.broadcast(operand._lazy_expr, shape, broadcast_dimensions)
return xla.DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)
else:
return xla.apply_primitive(broadcast_in_dim_p, operand, shape=shape,
broadcast_dimensions=broadcast_dimensions)
def _broadcast_in_dim_shape_rule(operand, *, shape, broadcast_dimensions):
_check_shapelike('broadcast_in_dim', 'shape', shape)
_check_shapelike('broadcast_in_dim', 'broadcast_dimensions',
broadcast_dimensions)
operand_ndim = onp.ndim(operand)
if operand_ndim != len(broadcast_dimensions):
msg = ('broadcast_in_dim broadcast_dimensions must have length equal to '
'operand ndim; got broadcast_dimensions {} for operand ndim {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim))
if len(shape) < operand_ndim:
msg = ('broadcast_in_dim target broadcast shape must have equal or higher rank '
'to the operand shape; got operand ndim {} and target broadcast ndim {}.')
raise TypeError(msg.format(operand_ndim, len(shape)))
if not set(broadcast_dimensions).issubset(set(range(len(shape)))):
msg = ('broadcast_in_dim broadcast_dimensions must be a subset of output '
'dimensions, got {} for operand ndim {} and shape {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim, shape))
if any(operand.shape[i] != 1 and operand.shape[i] != shape[broadcast_dimensions[i]]
for i in range(operand_ndim)):
msg = ('broadcast_in_dim operand dimension sizes must either be 1, or be '
'equal to their corresponding dimensions in the target broadcast shape; '
'got operand of shape {}, target broadcast shape {}, '
'broadcast_dimensions {} ')
raise TypeError(msg.format(operand.shape, shape, broadcast_dimensions))
if (len(broadcast_dimensions) != len(set(broadcast_dimensions)) or
tuple(broadcast_dimensions) != tuple(sorted(broadcast_dimensions))):
msg = ('broadcast_in_dim broadcast_dimensions must be strictly increasing; '
'got broadcast_dimensions {}')
raise TypeError(msg.format(broadcast_dimensions))
return shape
def _broadcast_in_dim_transpose_rule(t, *, shape, broadcast_dimensions):
axes = tuple(onp.delete(range(len(shape)), broadcast_dimensions))
return [_reduce_sum(t, axes)]
def _broadcast_in_dim_batch_rule(batched_args, batch_dims, *, shape,
broadcast_dimensions):
operand, = batched_args
bdim, = batch_dims
new_operand = batching.moveaxis(operand, bdim, 0)
new_shape = (operand.shape[bdim],) + shape
new_broadcast_dimensions = (0,) + tuple(onp.add(1, broadcast_dimensions))
return broadcast_in_dim(new_operand, new_shape, new_broadcast_dimensions), 0
broadcast_in_dim_p = standard_primitive(
_broadcast_in_dim_shape_rule, _input_dtype, 'broadcast_in_dim')
broadcast_in_dim_p.def_impl(_broadcast_in_dim_impl)
ad.deflinear(broadcast_in_dim_p, _broadcast_in_dim_transpose_rule)
batching.primitive_batchers[broadcast_in_dim_p] = _broadcast_in_dim_batch_rule
def _clamp_shape_rule(min, operand, max):
if min.shape and min.shape != operand.shape:
m = "clamp requires min.shape == operand.shape or min.shape == (), got {}."
raise TypeError(m.format(min.shape))
if max.shape and max.shape != operand.shape:
m = "clamp requires max.shape == operand.shape or max.shape == (), got {}."
raise TypeError(m.format(max.shape))
return operand.shape
_clamp_dtype_rule = partial(naryop_dtype_rule, _input_dtype, [_any, _any, _any],
'clamp')
clamp_p = standard_primitive(_clamp_shape_rule, _clamp_dtype_rule, 'clamp')
ad.defjvp(clamp_p,
lambda g, min, operand, max:
select(bitwise_and(gt(min, operand), lt(min, max)),
_brcast(g, operand), _zeros(operand)),
lambda g, min, operand, max:
select(bitwise_and(gt(operand, min), lt(operand, max)),
g, _zeros(operand)),
lambda g, min, operand, max:
select(lt(max, operand), _brcast(g, operand), _zeros(operand)))
def _concatenate_shape_rule(*operands, **kwargs):
dimension = kwargs.pop('dimension')
if not operands:
msg = "concatenate expects at least one operand, got 0."
raise TypeError(msg)
if not all(isinstance(operand, UnshapedArray) for operand in operands):
msg = "All objects to concatenate must be arrays, got {}."
op = next(op for op in operands if not isinstance(op, UnshapedArray))
raise TypeError(msg.format(type(op)))
if len(set(operand.ndim for operand in operands)) != 1:
msg = "Cannot concatenate arrays with different ranks, got {}."
raise TypeError(msg.format(", ".join(str(o.ndim) for o in operands)))
shapes = onp.array([operand.shape for operand in operands])
if not 0 <= dimension < shapes.shape[1]:
msg = "concatenate dimension out of bounds: dimension {} for shapes {}."
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
if not onp.all(onp.delete(shapes[0] == shapes, dimension, axis=1)):
msg = ("Cannot concatenate arrays with shapes that differ in dimensions "
"other than the one being concatenated: dimension {} for shapes {}.")
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
concat_size = sum(o.shape[dimension] for o in operands)
ex_shape = operands[0].shape
return ex_shape[:dimension] + (concat_size,) + ex_shape[dimension+1:]
def _concatenate_dtype_rule(*operands, **kwargs):
_check_same_dtypes('concatenate', False, *(o.dtype for o in operands))
return operands[0].dtype
def _concatenate_translation_rule(c, *operands, **kwargs):
dimension = kwargs.pop('dimension')
return xops.ConcatInDim(c, operands, dimension)
def _concatenate_transpose_rule(t, *operands, dimension):
operand_shapes = [o.aval.shape if ad.is_undefined_primal(o) else o.shape
for o in operands]
if t is ad_util.zero:
return [ad_util.zero if ad.is_undefined_primal(o) else None for o in operands]
else:
limit_points = onp.cumsum([shape[dimension] for shape in operand_shapes])
starts = onp.zeros((len(operands), t.ndim), dtype=int)
starts[1:, dimension] = limit_points[:-1]
limits = onp.tile(t.shape, (len(operands), 1))
limits[:, dimension] = limit_points
return [slice(t, start, limit) if ad.is_undefined_primal(o) else None
for o, start, limit in zip(operands, starts, limits)]
def _concatenate_batch_rule(batched_args, batch_dims, *, dimension):
size = next(op.shape[bdim] for op, bdim in zip(batched_args, batch_dims)
if bdim is not None)
operands = [batching.moveaxis(op, bdim, 0) if bdim is not None
else broadcast(op, (size,))
for op, bdim in zip(batched_args, batch_dims)]
return concatenate(operands, dimension + 1), 0
# The concatenate_p masking rule requires use of a while-loop construct and so
# is defined in lax_control_flow.py
concatenate_p = standard_primitive(
_concatenate_shape_rule, _concatenate_dtype_rule, 'concatenate',
_concatenate_translation_rule)
ad.deflinear(concatenate_p, _concatenate_transpose_rule)
ad.primitive_transposes[concatenate_p] = _concatenate_transpose_rule
batching.primitive_batchers[concatenate_p] = _concatenate_batch_rule
def _pad_dtype_rule(operand, padding_value, *, padding_config):
if operand.dtype != padding_value.dtype:
msg = "pad operand and padding_value must be same dtype: got {} and {}."
raise TypeError(msg.format(operand.dtype, padding_value.dtype))
return _input_dtype(operand, padding_value)
def _pad_shape_rule(operand, padding_value, *, padding_config):
lo, hi, interior = zip(*padding_config)
out_shape = onp.add(onp.add(onp.add(lo, hi), operand.shape),
onp.multiply(interior, onp.subtract(operand.shape, 1)))
return tuple(out_shape)
def _pad_transpose(t, operand, padding_value, *, padding_config):
if t is ad_util.zero:
return [ad_util.zero if ad.is_undefined_primal(operand) else None,
ad_util.zero if ad.is_undefined_primal(padding_value) else None]
lo, hi, interior = zip(*padding_config)
total = lambda x: _reduce_sum(x, list(range(t.ndim)))
def t_op():
unpad_config = zip(onp.negative(lo), onp.negative(hi), onp.zeros_like(interior))
unpadded = pad(t, onp.array(0., t.dtype), unpad_config)
return slice(unpadded, onp.zeros_like(lo), unpadded.shape, onp.add(interior, 1))
t_operand = t_op() if ad.is_undefined_primal(operand) else None
t_padv = sub(total(t), total(t_operand)) if ad.is_undefined_primal(padding_value) else None
return [t_operand, t_padv]
def _pad_batch_rule(batched_args, batch_dims, *, padding_config):
operand, padding_value = batched_args
operand_bdim, padding_value_bdim = batch_dims
if padding_value_bdim is None:
assert operand_bdim is not None
padding_config = list(padding_config)
padding_config.insert(operand_bdim, (0, 0, 0))
return pad(operand, padding_value, padding_config), operand_bdim
else:
raise NotImplementedError # loop and stack
def _pad_translation_rule(c, operand, padding_value, *, padding_config):
return xops.Pad(operand, padding_value,
xc.make_padding_config(padding_config))
pad_p = standard_primitive(_pad_shape_rule, _pad_dtype_rule, 'pad',
translation_rule=_pad_translation_rule)
ad.deflinear(pad_p, _pad_transpose)
ad.primitive_transposes[pad_p] = _pad_transpose
batching.primitive_batchers[pad_p] = _pad_batch_rule
# We have a nonstandard reshape impl so that we can be lazy about data movement.
def _reshape_impl(operand, *, new_sizes, dimensions):
old_sizes = onp.shape(operand)
if type(operand) is xla.DeviceArray and dimensions is None:
bcast_dims = _is_singleton_reshape(old_sizes, new_sizes)
if bcast_dims is not None:
aval = ShapedArray(new_sizes, operand.dtype)
lazy_expr = lazy.broadcast(operand._lazy_expr, new_sizes, bcast_dims)
return xla.DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)
if type(operand) is pxla.ShardedDeviceArray and dimensions is None:
array = _reshape_sharded_device_array(operand, new_sizes, old_sizes)
if array is not None:
return array
return xla.apply_primitive(reshape_p, operand, new_sizes=new_sizes,
dimensions=dimensions)
def _is_singleton_reshape(old, new):
# A singleton reshape is one where only singleton dimensions are added. We
# want to detect them because they can be expressed as (lazy) broadcasts.
old, new = iter(old), iter(new)
d1, d2 = next(old, None), next(new, None)
bcast_dims = []
i = 0
while True:
if d1 is d2 is None:
return bcast_dims
elif d1 == d2:
bcast_dims.append(i)
i += 1
d1, d2 = next(old, None), next(new, None)
elif d2 == 1:
i += 1
d2 = next(new, None)
else:
return None
def _reshape_sharded_device_array(array, new_sizes, old_sizes):
"""Returns None if `array` could not be efficiently reshaped.
This function is primarily to support soft_pmap, although these optimizations
could be useful when directly calling reshape as well.
"""
# TODO(jekbradbury): the axis split/merge logic below assumes that
# ShardedDevicesArrays are always sharded across their leading axes. Remove
# this constraint, especially if/when we add APIs that produce sharding across
# interior axes.
if any(num_shards != 1 for num_shards
in array.sharding_spec.shards_per_axis[1:]):
return None
# TODO(skye): handle replicated buffers
if array.sharding_spec.replication_factor != 1:
return None
# ShardedDevicesArrays require all buffers to have the same shape
chunk_shape = array.device_buffers[0].shape().dimensions()
chunk_size = chunk_shape[0] if len(chunk_shape) > 0 else 1
if _is_axis_merge(old_sizes, new_sizes):
num_chunks, ragged = divmod(new_sizes[0], chunk_size)
if ragged: return None
aval = ShapedArray(new_sizes, array.dtype)
sharding_spec = pxla.ShardingSpec(
shards_per_axis=(num_chunks,) + (1,) * (len(new_sizes) - 1),
is_axis_materialized=(True,) * len(new_sizes),
replication_factor=1)
return pxla.ShardedDeviceArray(aval, sharding_spec, array.device_buffers)
if _is_axis_split(old_sizes, new_sizes):
split_axis_size, ragged = divmod(old_sizes[0], chunk_size)
if ragged: return None
if new_sizes[0] != split_axis_size: return None
aval = ShapedArray(new_sizes, array.dtype)
sharding_spec = pxla._pmap_sharding_spec(
new_sizes[0], new_sizes[0], ShapedArray(new_sizes[1:], array.dtype), True)
return pxla.ShardedDeviceArray(aval, sharding_spec, array.device_buffers)
return None
def _is_axis_merge(s1, s2):
# TODO(skye): we might still be able to handle these cases as merges, I
# haven't thought about it much.
if len(s1) < 2 or len(s2) < 1: return False
return s1[2:] == s2[1:] and s1[0] * s1[1] == s2[0]
def _is_axis_split(s1, s2):
return _is_axis_merge(s2, s1)
def _reshape_shape_rule(operand, *, new_sizes, dimensions):
if not onp.all(onp.greater_equal(new_sizes, 0)):
msg = 'reshape new_sizes must all be positive, got {}.'
raise TypeError(msg.format(new_sizes))
if prod(onp.shape(operand)) != prod(new_sizes):
msg = 'reshape total size must be unchanged, got new_sizes {} for shape {}.'
raise TypeError(msg.format(new_sizes, onp.shape(operand)))
if dimensions is not None:
if set(dimensions) != set(range(onp.ndim(operand))):
msg = ('reshape dimensions must be a permutation of operand dimensions, '
'got dimensions {} for shape {}.')
raise TypeError(msg.format(dimensions, onp.shape(operand)))
return tuple(new_sizes)
def _reshape_dtype_rule(operand, *, new_sizes, dimensions):
return operand.dtype
def _reshape_translation_rule(c, operand, *, new_sizes, dimensions):
if dimensions is None:
return xops.Reshape(operand, new_sizes)
else:
return xops.Reshape(operand, dimensions, new_sizes)
def _reshape_transpose_rule(t, operand, *, new_sizes, dimensions):
assert ad.is_undefined_primal(operand)
if dimensions is None:
return [reshape(t, operand.aval.shape)]
else:
return [transpose(reshape(t, onp.take(operand.aval.shape, dimensions)),
onp.argsort(dimensions))]
def _reshape_batch_rule(batched_args, batch_dims, *, new_sizes, dimensions):
operand, = batched_args
bdim, = batch_dims
operand = batching.moveaxis(operand, bdim, 0)
if dimensions is not None:
dimensions = (0,) + tuple(onp.add(1, dimensions))
return reshape(operand, operand.shape[:1] + new_sizes, dimensions), 0
reshape_p = standard_primitive(_reshape_shape_rule, _reshape_dtype_rule,
'reshape', _reshape_translation_rule)
reshape_p.def_impl(_reshape_impl)
ad.deflinear2(reshape_p, _reshape_transpose_rule)
batching.primitive_batchers[reshape_p] = _reshape_batch_rule
def _rev_shape_rule(operand, *, dimensions):
_check_shapelike('rev', 'dimensions', dimensions)
if len(set(dimensions)) != len(dimensions):
msg = 'rev dimensions must be unique, got {}.'
raise TypeError(msg.format(dimensions))
if dimensions and not _max(dimensions) < operand.ndim:
msg = ('rev dimensions must all be less than operand ndim, got dimensions '
'{} for operand ndim {}.')
raise TypeError(msg.format(dimensions, operand.ndim))
return operand.shape
def _rev_batch_rule(batched_args, batch_dims, *, dimensions):
operand, = batched_args
bdim, = batch_dims
new_dimensions = [i + 1 if i >= bdim else i for i in dimensions]
return rev(operand, new_dimensions), bdim
rev_p = standard_primitive(_rev_shape_rule, _input_dtype, 'rev')
ad.deflinear(rev_p, lambda t, dimensions: [rev(t, dimensions)])
batching.primitive_batchers[rev_p] = _rev_batch_rule
def _transpose_impl(operand, *, permutation):
if type(operand) is xla.DeviceArray:
lazy_expr = lazy.transpose(operand._lazy_expr, permutation)
aval = ShapedArray(lazy_expr.shape, operand.dtype)
return xla.DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)
else:
return xla.apply_primitive(transpose_p, operand, permutation=permutation)
def _transpose_shape_rule(operand, *, permutation):
if not isinstance(permutation, (tuple, list, onp.ndarray)):
msg = "transpose permutation must be a tuple/list/ndarray, got {}."
raise TypeError(msg.format(type(permutation)))
if tuple(sorted(permutation)) != tuple(range(operand.ndim)):
msg = ("transpose permutation isn't a permutation of operand dimensions, "
"got permutation {} for operand shape {}.")
raise TypeError(msg.format(permutation, operand.shape))
return tuple(onp.take(operand.shape, permutation))
def _transpose_batch_rule(batched_args, batch_dims, *, permutation):
operand, = batched_args
bdim, = batch_dims
perm = (bdim,) + tuple(i if i < bdim else i+1 for i in permutation)
return transpose(operand, perm), 0
transpose_p = standard_primitive(_transpose_shape_rule, _input_dtype,
'transpose')
transpose_p.def_impl(_transpose_impl)
ad.deflinear(transpose_p,
lambda t, permutation: [transpose(t, onp.argsort(permutation))])
batching.primitive_batchers[transpose_p] = _transpose_batch_rule
def _select_shape_rule(pred, on_true, on_false):
if on_true.shape != on_false.shape:
msg = "select on_true and on_false must have the same shape, got {} and {}."
raise TypeError(msg.format(on_true.shape, on_false.shape))
if pred.shape and pred.shape != on_true.shape:
msg = ("select pred must be scalar or have the same shape as on_true and "
"on_false, got pred shape {} for on_true and on_false of shape {}.")
raise TypeError(msg.format(pred.shape, on_true.shape))
return on_true.shape
def _select_dtype_rule(pred, on_true, on_false):
_check_same_dtypes("select", False, on_true.dtype, on_false.dtype)
if not dtypes.issubdtype(pred.dtype, onp.bool_):
msg = "select pred must be boolean type, got {}."
raise TypeError(msg.format(pred.dtype))
return on_true.dtype
def _select_transpose_rule(t, pred, on_true, on_false):
assert not ad.is_undefined_primal(pred)
if t is ad_util.zero:
return [None,
ad_util.zero if ad.is_undefined_primal(on_true) else None,
ad_util.zero if ad.is_undefined_primal(on_false) else None]
else:
zeros = full_like(t, 0)
return [None,
select(pred, t, zeros) if ad.is_undefined_primal(on_true) else None,
select(pred, zeros, t) if ad.is_undefined_primal(on_false) else None]
def _select_batch_rule(batched_args, batch_dims, **unused_kwargs):
pred, on_true, on_false, = batched_args
pred_bdim, ot_bdim, of_bdim = batch_dims
size = next(x.shape[i] for x, i in zip(batched_args, batch_dims)
if i is not None)
# avoid transposes and some broadcasts in special cases
if pred_bdim == ot_bdim == of_bdim:
if onp.shape(pred) == onp.shape(on_true):
return select(pred, on_true, on_false), pred_bdim
else:
# vmapped function had a scalar pred with nonscalar args
assert onp.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [pred_bdim])
return select(pred, on_true, on_false), pred_bdim
elif onp.ndim(pred) == 0 and ot_bdim is not None and of_bdim is not None:
if ot_bdim == of_bdim:
return select(pred, on_true, on_false), ot_bdim
elif onp.shape(on_true) == onp.shape(on_false):
on_false = batching.moveaxis(on_false, of_bdim, ot_bdim)
return select(pred, on_true, on_false), ot_bdim
pred = batching.bdim_at_front(pred, pred_bdim, size) if onp.shape(pred) else pred
if not onp.shape(on_true) == onp.shape(on_false) == ():
on_true = batching.bdim_at_front(on_true, ot_bdim, size)
on_false = batching.bdim_at_front(on_false, of_bdim, size)
assert onp.shape(on_true) == onp.shape(on_false)
if 0 < onp.ndim(pred) < onp.ndim(on_true):
# vmapped function had a scalar pred with nonscalar args
assert onp.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [0])
if onp.ndim(pred) > onp.ndim(on_true):
assert onp.ndim(on_true) == 0
on_true = broadcast(on_true, pred.shape)
on_false = broadcast(on_false, pred.shape)
return select(pred, on_true, on_false), 0
select_p = standard_primitive(_select_shape_rule, _select_dtype_rule, 'select')
ad.defjvp(select_p,
None,
lambda g, b, x, y: select(b, g, _zeros(g)),
lambda g, b, x, y: select(b, _zeros(g), g))
ad.primitive_transposes[select_p] = _select_transpose_rule
batching.primitive_batchers[select_p] = _select_batch_rule
def _slice_shape_rule(operand, *, start_indices, limit_indices, strides):
_check_shapelike("slice", "start_indices", start_indices)
_check_shapelike("slice", "limit_indices", limit_indices)
if operand.ndim != len(start_indices):
msg = ("slice start_indices must have length equal to the number of "
"dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(limit_indices):
msg = ("slice limit_indices must have the same length as start_indices, "
"got start_inidices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if not onp.all(onp.less_equal(limit_indices, operand.shape)):
msg = ("slice limit_indices must be less than or equal to operand shape, "
"got limit_indices {} for operand shape {}.")
raise TypeError(msg.format(limit_indices, operand.shape))
if not onp.all(onp.greater_equal(start_indices, 0)):
msg = ("slice start_indices must be greater than or equal to zero, "
"got start_indices of {}.")
raise TypeError(msg.format(start_indices))
if not onp.all(onp.greater_equal(limit_indices, start_indices)):
msg = ("slice limit_indices must be greater than or equal to start_indices,"
" got start_indices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if strides is None:
strides = onp.ones(operand.ndim, onp.int32)
else:
_check_shapelike("slice", "strides", strides)
if len(strides) != operand.ndim:
msg = ("slice strides must have length equal to the number of dimensions "
"of the operand, got strides {} for operand shape {}.")
raise TypeError(msg.format(strides, operand.shape))
if not onp.all(onp.greater(strides, 0)):
msg = "slice strides must be positive, got {}"
raise TypeError(msg.format(strides))
result_shape = onp.floor_divide(
onp.add(onp.subtract(limit_indices, start_indices), strides) - 1, strides)
return tuple(result_shape)
def _slice_translation_rule(c, operand, *, start_indices, limit_indices,
strides):
return xops.Slice(operand, start_indices, limit_indices,
strides or [1] * len(start_indices))
def _slice_transpose_rule(t, operand, *, start_indices, limit_indices, strides):
assert ad.is_undefined_primal(operand)
operand_shape = operand.aval.shape
if strides is None or onp.all(onp.equal(strides, 1)):
pads = zip(start_indices, onp.subtract(operand_shape, limit_indices),
(0,) * len(start_indices))
else:
real_limits = onp.add(onp.add(start_indices, 1),
onp.multiply(onp.subtract(t.shape, 1), strides))
pads = zip(start_indices, onp.subtract(operand_shape, real_limits),
onp.subtract(strides, 1))
result = pad(t, _const(t, 0), pads)
assert result.shape == operand_shape
return [result]
def _slice_batching_rule(batched_args, batch_dims, *, start_indices,
limit_indices, strides):
operand, = batched_args
bdim, = batch_dims
new_start_indices = list(start_indices)
new_start_indices.insert(bdim, 0)
new_limit_indices = list(limit_indices)
new_limit_indices.insert(bdim, operand.shape[bdim])
if strides is None:
new_strides = None
else:
new_strides = list(strides)
new_strides.insert(bdim, 1)
out = slice(operand, new_start_indices, new_limit_indices, new_strides)
return out, bdim
slice_p = standard_primitive(_slice_shape_rule, _input_dtype, 'slice',
_slice_translation_rule)
ad.deflinear2(slice_p, _slice_transpose_rule)
batching.primitive_batchers[slice_p] = _slice_batching_rule
def _dynamic_slice_shape_rule(operand, *start_indices, slice_sizes):
if operand.ndim != len(start_indices):
msg = ("dynamic_slice start_indices must have length equal to the number "
"of dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(slice_sizes):
msg = ("dynamic_slice slice_sizes must have the same length as "
"start_indices, got start_inidices length {} and slice_sizes {}.")
raise TypeError(msg.format(len(start_indices), slice_sizes))
if not onp.all(onp.less_equal(slice_sizes, operand.shape)):
msg = ("slice slice_sizes must be less than or equal to operand shape, "
"got slice_sizes {} for operand shape {}.")
raise TypeError(msg.format(slice_sizes, operand.shape))
if not onp.all(onp.greater_equal(slice_sizes, 0)):
msg = ("slice slice_sizes must be greater than or equal to zero, "
"got slice_sizes of {}.")
raise TypeError(msg.format(slice_sizes))
return tuple(slice_sizes)
def _dynamic_slice_dtype_rule(operand, *start_indices, slice_sizes):
if any(i.dtype != start_indices[0].dtype or
not dtypes.issubdtype(i.dtype, onp.integer) for i in start_indices):
msg = ("index arguments to dynamic_slice must be integers of the same "
"type, got: {}")
raise TypeError(msg.format(", ".join(i.dtype.name for i in start_indices)))
return operand.dtype
def _dynamic_slice_translation_rule(c, operand, *start_indices, slice_sizes):
return xops.DynamicSlice(operand, start_indices, slice_sizes)
def _dynamic_slice_jvp(primals, tangents, *, slice_sizes):
tangent_out = ad_util.zero
if tangents[0] is not ad_util.zero:
tangent_out = dynamic_slice(tangents[0], primals[1:], slice_sizes)
return dynamic_slice(primals[0], primals[1:], slice_sizes), tangent_out
def _dynamic_slice_transpose_rule(t, operand, *start_indices, slice_sizes):
assert ad.is_undefined_primal(operand)
assert all(not ad.is_undefined_primal(s) for s in start_indices)
operand_shape = operand.aval.shape
zeros = full(operand_shape, tie_in(t, _zero(t)))
return ([dynamic_update_slice(zeros, t, start_indices)] +
[None] * len(start_indices))
def _batch_dynamic_slice_indices(indices, bdims):
size = next((x.shape[i] for x, i in zip(indices, bdims) if i is not None), -1)
if size < 0:
return concatenate([reshape(i, [1]) for i in indices], 0), None
indices = concatenate(
[broadcast_in_dim(x, (size, 1),
broadcast_dimensions=((0,) if i is not None else ()))
for x, i in zip(indices, bdims)],
dimension=1)
return indices, 0
def _dynamic_slice_batching_rule(batched_args, batch_dims, *, slice_sizes):
# A dynamic slice is a special case of gather; we can delegate to the gather
# batching rule.
# TODO(phawkins): consider removing dynamic_slice entirely and using gather
# always.
operand, *start_indices = batched_args
operand_bd, *start_idx_bds = batch_dims
operand_shape = (operand.shape if operand_bd is batching.not_mapped
else tuple(onp.delete(operand.shape, operand_bd)))
dims = tuple(range(len(operand_shape)))
dnums = GatherDimensionNumbers(offset_dims=dims, collapsed_slice_dims=(),
start_index_map=dims)
index, index_bdim = _batch_dynamic_slice_indices(start_indices, start_idx_bds)
return _gather_batching_rule(
[operand, index], [operand_bd, index_bdim], dimension_numbers=dnums,
slice_sizes=slice_sizes)
dynamic_slice_p = standard_primitive(
_dynamic_slice_shape_rule, _dynamic_slice_dtype_rule, 'dynamic_slice',
_dynamic_slice_translation_rule)
ad.primitive_jvps[dynamic_slice_p] = _dynamic_slice_jvp # TODO
ad.primitive_transposes[dynamic_slice_p] = _dynamic_slice_transpose_rule
batching.primitive_batchers[dynamic_slice_p] = _dynamic_slice_batching_rule
def _dynamic_update_slice_shape_rule(operand, update, *start_indices):
if operand.ndim != update.ndim:
msg = ("dynamic_update_slice update must have the same rank as operand, "
"got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
if operand.ndim != len(start_indices):
msg = ("dynamic_update_slice start_indices must have length equal to the "
"rank of operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if not onp.all(onp.less_equal(update.shape, operand.shape)):
msg = ("dynamic_update_slice update shape must be smaller than operand "
"shape, got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
return operand.shape
def _dynamic_update_slice_dtype_rule(operand, update, *start_indices):
_check_same_dtypes("dynamic_update_slice", False, operand.dtype, update.dtype)
if any(i.dtype != start_indices[0].dtype or
not dtypes.issubdtype(i.dtype, onp.integer) for i in start_indices):
msg = ("index arguments to dynamic_update_slice must be integers of the "
"same type, got {}")
raise TypeError(msg.format(", ".join(i.dtype.name for i in start_indices)))
return operand.dtype
def _dynamic_update_slice_jvp(primals, tangents):
operand, update = primals[:2]
start_indices = primals[2:]
g_operand, g_update = tangents[:2]
val_out = dynamic_update_slice(operand, update, start_indices)
if g_operand is ad_util.zero and g_update is ad_util.zero:
tangent_out = ad_util.zero
else:
g_operand = ad.instantiate_zeros(operand, g_operand)
g_update = ad.instantiate_zeros(update, g_update)
tangent_out = dynamic_update_slice(g_operand, g_update, start_indices)
return val_out, tangent_out
def _dynamic_update_slice_transpose_rule(t, operand, update, *start_indices):
assert all(not ad.is_undefined_primal(x) for x in start_indices)
if ad.is_undefined_primal(update):
update_shape = update.aval.shape
else:
update_shape = update.shape
dus = dynamic_update_slice
ds = dynamic_slice
zeros = _zeros(t, shape=update_shape)
operand_t = dus(t, zeros, start_indices) if ad.is_undefined_primal(operand) else None
update_t = ds(t, start_indices, update_shape) if ad.is_undefined_primal(update) else None
return [operand_t, update_t] + [None] * len(start_indices)
def _dynamic_update_slice_translation_rule(c, operand, update, *start_indices):
return xops.DynamicUpdateSlice(operand, update, start_indices)
def _dynamic_update_slice_batching_rule(batched_args, batch_dims):
# A dynamic update slice is a special case of scatter; we can delegate to the
# scatter batching rule.
# TODO(phawkins): consider removing dynamic_update_slice entirely and using
# scatter always.
operand, update, *start_idx = batched_args
operand_bd, update_bd, *start_idx_bd = batch_dims
update_shape = (update.shape if update_bd is batching.not_mapped
else tuple(onp.delete(update.shape, update_bd)))
dims = tuple(range(len(update_shape)))
dnums = ScatterDimensionNumbers(update_window_dims=dims,
inserted_window_dims=(),
scatter_dims_to_operand_dims=dims)
index, index_bdim = _batch_dynamic_slice_indices(start_idx, start_idx_bd)
return _scatter_batching_rule(
scatter, (operand, index, update), (operand_bd, index_bdim, update_bd),
update_jaxpr=None, update_consts=None, dimension_numbers=dnums)
dynamic_update_slice_p = standard_primitive(
_dynamic_update_slice_shape_rule, _dynamic_update_slice_dtype_rule,
'dynamic_update_slice', _dynamic_update_slice_translation_rule)
ad.primitive_jvps[dynamic_update_slice_p] = _dynamic_update_slice_jvp
ad.primitive_transposes[dynamic_update_slice_p] = \
_dynamic_update_slice_transpose_rule
batching.primitive_batchers[dynamic_update_slice_p] = \
_dynamic_update_slice_batching_rule
def _gather_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is GatherDimensionNumbers
proto = xla_client.GatherDimensionNumbers()
proto.offset_dims.extend(dimension_numbers.offset_dims)
proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)
proto.start_index_map.extend(dimension_numbers.start_index_map)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _gather_dtype_rule(operand, start_indices, **kwargs):
if not dtypes.issubdtype(start_indices.dtype, onp.integer):
raise ValueError("start_indices must have an integer type")
return dtypes.canonicalize_dtype(operand.dtype)
def _gather_shape_rule(operand, start_indices, *, dimension_numbers,
slice_sizes):
if len(operand.shape) != len(slice_sizes):
msg = ("slice_sizes must have rank equal to the gather operand; "
"operand.shape={}, slice_sizes={}".format(operand.shape, slice_sizes))
raise ValueError(msg)
result_rank = len(dimension_numbers.offset_dims) + start_indices.ndim - 1
start_indices_shape = iter(start_indices.shape[:-1])
slice_sizes = iter(onp.delete(slice_sizes, dimension_numbers.collapsed_slice_dims))
return tuple(next(slice_sizes) if i in dimension_numbers.offset_dims
else next(start_indices_shape) for i in range(result_rank))
def _gather_translation_rule(c, operand, start_indices, *, dimension_numbers,
slice_sizes):
indices_shape = c.get_shape(start_indices)
return xops.Gather(
operand, start_indices,
_gather_dimensions_proto(indices_shape, dimension_numbers), slice_sizes,
indices_are_sorted=False)
def _gather_jvp_rule(g, operand, start_indices, *, dimension_numbers,
slice_sizes):
return gather(g, start_indices, dimension_numbers, slice_sizes)
def _gather_transpose_rule(t, operand, start_indices, *, dimension_numbers,
slice_sizes):
assert ad.is_undefined_primal(operand)
operand_shape = operand.aval.shape
if t is ad_util.zero:
return [ad_util.zero, ad_util.zero]
zeros = full(operand_shape, tie_in(t, _zero(t)))
scatter_dnums = ScatterDimensionNumbers(
update_window_dims=dimension_numbers.offset_dims,
inserted_window_dims=dimension_numbers.collapsed_slice_dims,
scatter_dims_to_operand_dims=dimension_numbers.start_index_map)
return [scatter_add(zeros, start_indices, t, scatter_dnums), ad_util.zero]
def _gather_batching_rule(batched_args, batch_dims, *, dimension_numbers,
slice_sizes):
operand, start_indices = batched_args
operand_bdim, start_indices_bdim = batch_dims
if operand_bdim is not None and start_indices_bdim is None:
operand = batching.moveaxis(operand, operand_bdim, 0)
slice_sizes = (operand.shape[0],) + slice_sizes
offset_dims = (0,) + tuple(onp.add(1, dimension_numbers.offset_dims))
collapsed_slice_dims = tuple(onp.add(1, dimension_numbers.collapsed_slice_dims))
start_index_map = tuple(onp.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
elif operand_bdim is None and start_indices_bdim is not None:
start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)
offset_dims = tuple(onp.add(1, dimension_numbers.offset_dims))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=dimension_numbers.collapsed_slice_dims,
start_index_map=dimension_numbers.start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
else:
# move our batch dimensions to the front to preserve sanity
operand = batching.moveaxis(operand, operand_bdim, 0)
start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)
# Example: user code had start_indices shape (3, 4, 5), and we have to deal
# with start_indices shape (7, 3, 4, 5). We transform that to a
# start_indices of shape (7, 3, 4, 6) where we concatenated an iota that
# counts along our batch dimension to the front of the ndindex.
count_shape = list(start_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(start_indices.dtype, tuple(count_shape), 0)
start_indices = concatenate([counts, start_indices], len(count_shape) - 1)
slice_sizes = (1,) + slice_sizes
collapsed_slice_dims = (0,) + tuple(onp.add(1, dimension_numbers.collapsed_slice_dims))
offset_dims = tuple(onp.add(1, dimension_numbers.offset_dims))
start_index_map = (0,) + tuple(onp.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
gather_p = standard_primitive(
_gather_shape_rule, _gather_dtype_rule, 'gather',
_gather_translation_rule)
ad.defjvp(gather_p, _gather_jvp_rule, None)
ad.primitive_transposes[gather_p] = _gather_transpose_rule
batching.primitive_batchers[gather_p] = _gather_batching_rule
def _scatter_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is ScatterDimensionNumbers
proto = xla_client.ScatterDimensionNumbers()
proto.update_window_dims.extend(dimension_numbers.update_window_dims)
proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)
proto.scatter_dims_to_operand_dims.extend(
dimension_numbers.scatter_dims_to_operand_dims)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _scatter_dtype_rule(operand, scatter_indices, updates, **kwargs):
if not dtypes.issubdtype(scatter_indices.dtype, onp.integer):
raise ValueError("scatter_indices must have an integer type")
_check_same_dtypes("scatter", False, operand.dtype, updates.dtype)
return dtypes.canonicalize_dtype(operand.dtype)
def _scatter_shape_rule(operand, scatter_indices, updates, **kwargs):
return operand.shape
def _scatter_translation_rule(c, operand, scatter_indices, updates,
update_jaxpr, update_consts, dimension_numbers):
dtype = c.get_shape(operand).numpy_dtype()
init_value = xb.constant(c, onp.array(0, dtype))
update_computation = _reduction_computation(
c, update_jaxpr, update_consts, init_value)
indices_shape = c.get_shape(scatter_indices)
return xops.Scatter(operand, scatter_indices, updates, update_computation,
_scatter_dimensions_proto(indices_shape, dimension_numbers),
False, False)
def _scatter_add_jvp(primals, tangents, *, update_jaxpr, update_consts,
dimension_numbers):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
val_out = scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers)
if g_operand is ad_util.zero and g_updates is ad_util.zero:
tangent_out = ad_util.zero
else:
g_operand = ad.instantiate_zeros(operand, g_operand)
g_updates = ad.instantiate_zeros(updates, g_updates)
tangent_out = scatter_add_p.bind(
g_operand, scatter_indices, g_updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers)
return val_out, tangent_out
def _scatter_add_transpose_rule(t, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers):
assert not ad.is_undefined_primal(scatter_indices)
if ad.is_undefined_primal(updates):
updates_shape = updates.aval.shape
else:
updates_shape = updates.shape
if t is ad_util.zero:
return [ad_util.zero, None, ad_util.zero]
operand_t = update_t = None
if ad.is_undefined_primal(operand):
operand_t = t
if ad.is_undefined_primal(updates):
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])
pos += 1
update_t = gather(t, scatter_indices, dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
return [operand_t, None, update_t]
def _scatter_mul_transpose_rule(t, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers):
assert not ad.is_undefined_primal(scatter_indices)
if ad.is_undefined_primal(updates):
updates_shape = updates.aval.shape
else:
updates_shape = updates.shape
if t is ad_util.zero:
return [ad_util.zero, None, ad_util.zero]
operand_t = update_t = None
if ad.is_undefined_primal(operand):
operand_t = scatter_mul(t, scatter_indices, updates,
dimension_numbers=dimension_numbers)
if ad.is_undefined_primal(updates):
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])
pos += 1
update_t = gather(mul(t, operand), scatter_indices,
dimension_numbers=gather_dnums, slice_sizes=slice_sizes)
return [operand_t, None, update_t]
def _scatter_batching_rule(scatter_op, batched_args, batch_dims, *,
update_jaxpr, update_consts, dimension_numbers):
operand, scatter_indices, updates = batched_args
operand_bdim, scatter_indices_bdim, updates_bdim = batch_dims
del update_jaxpr, update_consts # Unused.
# move the operand batch dim to the front if it is not None, otherwise create
# it at the front (so that we can scatter into it)
size = next(x.shape[ax] for x, ax in zip(batched_args, batch_dims)
if ax is not None)
operand = batching.bdim_at_front(operand, operand_bdim, size)
operand_bdim = 0
updates = batching.bdim_at_front(updates, updates_bdim, size)
if scatter_indices_bdim is None:
inserted_window_dims = tuple(onp.add(1, dimension_numbers.inserted_window_dims))
update_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.update_window_dims))
scatter_dims_to_operand_dims = tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(operand, scatter_indices, updates, dnums), 0
# see the third case in _gather_batching_rule for comparison and comments
scatter_indices = batching.bdim_at_front(
scatter_indices, scatter_indices_bdim, size)
count_shape = list(scatter_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(scatter_indices.dtype, tuple(count_shape), 0)
scatter_indices = concatenate([counts, scatter_indices],
len(count_shape) - 1)
update_window_dims = tuple(onp.add(1, dimension_numbers.update_window_dims))
inserted_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.inserted_window_dims))
scatter_dims_to_operand_dims = (0,) + tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(operand, scatter_indices, updates, dnums), 0
scatter_add_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-add',
_scatter_translation_rule)
ad.primitive_jvps[scatter_add_p] = _scatter_add_jvp
ad.primitive_transposes[scatter_add_p] = _scatter_add_transpose_rule
batching.primitive_batchers[scatter_add_p] = (
partial(_scatter_batching_rule, scatter_add))
scatter_mul_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-mul',
_scatter_translation_rule)
def _scatter_mul_jvp_rhs(g, x, i, y, *, dimension_numbers, **kw):
return mul(x, scatter_add(zeros_like_array(x), i, g,
dimension_numbers=dimension_numbers))
ad.defjvp(scatter_mul_p,
lambda g, x, i, y, **kw: scatter_mul_p.bind(g, i, y, **kw),
None,
_scatter_mul_jvp_rhs)
ad.primitive_transposes[scatter_mul_p] = _scatter_mul_transpose_rule
batching.primitive_batchers[scatter_mul_p] = (
partial(_scatter_batching_rule, scatter_mul))
# TODO(jlebar): Add derivatives.
scatter_min_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-min',
_scatter_translation_rule)
batching.primitive_batchers[scatter_min_p] = (
partial(_scatter_batching_rule, scatter_min))
# TODO(jlebar): Add derivatives.
scatter_max_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-max',
_scatter_translation_rule)
batching.primitive_batchers[scatter_max_p] = (
partial(_scatter_batching_rule, scatter_max))
def _scatter_jvp(primals, tangents, *, update_jaxpr, update_consts,
dimension_numbers):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
dnums = dimension_numbers
if g_operand is ad_util.zero and g_updates is ad_util.zero:
val_out = scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dnums)
tangent_out = ad_util.zero
return val_out, tangent_out
g_operand = ad.instantiate_zeros(operand, g_operand)
g_updates = ad.instantiate_zeros(updates, g_updates)
# If there are overlapping indices in the scatter, it is unspecified which
# update "wins". So we use the following perhaps surprising scheme:
# a) attach a positive ID to each update in updates, forming (value, id) pairs
# (using a new array dimension because scatter doesn't actually support
# pairs).
# b) perform the scatter, yielding (value, id) updates, which we split apart.
# c) perform the inverse gather on the ids (similar to
# _scatter_add_transpose), and use it to build a mask for the tangent of
# `updates`.
# d) perform a scatter-add on the masked JVP values. A benefit of using
# scatter-add here is that we don't need a `scatter` transpose rule.
# a) add unique positive IDs (iotas) to the updates, and zeros to the operand.
operand_shape = operand.shape
updates_shape = updates.shape
updates_dtype = _dtype(updates)
new_operand = reshape(operand, (1,) + operand_shape)
new_operand = pad(new_operand, _zero(operand),
((0, 1, 0),) + tuple((0, 0, 0) for _ in operand_shape))
# We specify the dtype here in case `updates_shape` is an empty tuple, in
# which case numpy defaults to float64.
ids_shape = onp.array(updates_shape, dtype=onp.int32)
ids_shape[dnums.update_window_dims,] = 1
num_ids = onp.prod(ids_shape)
update_ids = add(reshape(iota(updates_dtype, num_ids), ids_shape),
_ones(updates))
# TODO(phawkins): there is a potential bug here if the number of updates
# is large enough to overflow the number of mantissa bits in a float so IDs
# end up colliding. We could also utilize the exponent and sign bits, with a
# little more work.
assert num_ids < (2 ** dtypes.finfo(updates_dtype).nmant)
updates = reshape(updates, (1,) + updates_shape)
reshaped_update_ids = reshape(update_ids, (1,) + updates_shape)
updates_and_ids = concatenate((updates, reshaped_update_ids), 0)
new_dnums = ScatterDimensionNumbers(
update_window_dims=(0,) + tuple(d + 1 for d in dnums.update_window_dims),
inserted_window_dims=tuple(d + 1 for d in dnums.inserted_window_dims),
scatter_dims_to_operand_dims=tuple(d + 1 for d in dnums.scatter_dims_to_operand_dims))
outputs = scatter_p.bind(
new_operand, scatter_indices, updates_and_ids, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=new_dnums)
val_out = index_in_dim(outputs, 0, keepdims=False)
scattered_ids = index_in_dim(outputs, 1, keepdims=False)
# b) compute the inverse gather that "undoes" the scatter on the id values.
gather_dnums = GatherDimensionNumbers(
offset_dims=dnums.update_window_dims,
collapsed_slice_dims=dnums.inserted_window_dims,
start_index_map=dnums.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(scattered_ids.shape)):
if i in dnums.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dnums.update_window_dims[pos]])
pos += 1
gathered_update_ids = gather(scattered_ids, scatter_indices,
dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
# c) mask off input JVP elements that do not correspond to a primal output.
masked_g_operand = select(eq(scattered_ids, _zeros(scattered_ids)),
g_operand, _zeros(g_operand))
masked_g_updates = select(eq(update_ids, gathered_update_ids),
g_updates, _zeros(g_updates))
# d) perform a scatter-add to compute the tangent output.
tangent_out = scatter_add(masked_g_operand, scatter_indices, masked_g_updates,
dimension_numbers=dnums)
return val_out, tangent_out
scatter_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter',
_scatter_translation_rule)
ad.primitive_jvps[scatter_p] = _scatter_jvp
batching.primitive_batchers[scatter_p] = (
partial(_scatter_batching_rule, scatter))
def _reduce_shape_rule(operand, init_value, *, computation, jaxpr, consts,
dimensions):
return tuple(onp.delete(operand.shape, dimensions))
def _reduce_translation_rule(c, operand, init_value, *, computation, jaxpr,
consts, dimensions):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
return xops.Reduce(c, [operand], [init_value], xla_computation, dimensions)
def _reduce_batch_rule(batched_args, batch_dims, *, computation, jaxpr, consts,
dimensions):
operand, init_value = batched_args
operand_bdim, init_value_bdim = batch_dims
if init_value_bdim is None:
assert operand_bdim is not None
new_dimensions = [d + bool(d >= operand_bdim) for d in dimensions]
new_operand_bdim = operand_bdim - int(onp.sum(onp.less(dimensions, operand_bdim)))
return reduce(operand, init_value, computation, new_dimensions), new_operand_bdim
else:
raise NotImplementedError # loop and stack
def _reduction_computation(c, jaxpr, consts, init_value):
shape = c.get_shape(init_value)
axis_env = xla.AxisEnv(1) # no parallel primitives inside reductions
subc = xla_bridge.make_computation_builder("reduction_computation")
assert len(consts) == 0, "Reduction computations cannot have constants"
args = [xb.parameter(subc, 0, shape), xb.parameter(subc, 1, shape)]
out, = xla.jaxpr_subcomp(subc, jaxpr, None, axis_env, consts, '', *args)
return subc.build(out)
def _masking_defreducer(prim, identity):
masking.masking_rules[prim] = partial(_reducer_masking_rule, prim, identity)
def _reducer_masking_rule(prim, identity, padded_vals, logical_shapes,
axes):
(padded_val,), (logical_shape,) = padded_vals, logical_shapes
padded_shape = masking.padded_shape_as_value(padded_val.shape)
masks = [broadcasted_iota(onp.int32, padded_shape, i) < d
for i, d in enumerate(logical_shape) if i in axes]
mask = _reduce(operator.and_, masks)
masked_val = select(mask, padded_val, identity(padded_shape, padded_val.dtype))
return prim.bind(masked_val, axes=axes)
reduce_p = standard_primitive(_reduce_shape_rule, _input_dtype, 'reduce',
_reduce_translation_rule)
batching.primitive_batchers[reduce_p] = _reduce_batch_rule
def _reduce_number_dtype_rule(name, operand, *args, **kw):
if not dtypes.issubdtype(operand.dtype, onp.number):
raise TypeError("{} does not accept dtype {}. Accepted dtypes are subtypes "
"of number.".format(name, onp.dtype(operand.dtype).name))
return dtypes.canonicalize_dtype(operand.dtype)
def _reduce_sum_shape_rule(operand, *, axes):
return _reduce_op_shape_rule(operand, axes=axes)
def _reduce_sum_translation_rule(c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, onp.array(0, dtype))],
xla.primitive_subcomputation(add_p, scalar, scalar),
axes)
def _reduce_sum_transpose_rule(cotangent, operand, *, axes):
assert ad.is_undefined_primal(operand)
input_shape = operand.aval.shape
broadcast_dimensions = tuple(onp.delete(onp.arange(len(input_shape)), axes))
result = broadcast_in_dim(cotangent, input_shape, broadcast_dimensions)
assert result.shape == input_shape
return [result]
reduce_sum_p = standard_primitive(
_reduce_sum_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_sum'),
'reduce_sum', _reduce_sum_translation_rule)
ad.deflinear2(reduce_sum_p, _reduce_sum_transpose_rule)
batching.defreducer(reduce_sum_p)
_masking_defreducer(reduce_sum_p,
lambda shape, dtype: onp.broadcast_to(onp.array(0, dtype), shape))
def _reduce_op_shape_rule(operand, *, axes):
return tuple(onp.delete(operand.shape, axes))
def _reduce_prod_translation_rule(c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, onp.array(1, dtype))],
xla.primitive_subcomputation(mul_p, scalar, scalar), axes)
def _reduce_prod_jvp_rule(primals, tangents, *, axes):
operand, = primals
tangent, = tangents
input_shape = onp.array(operand.shape)
n = onp.prod(input_shape[list(axes)])
non_axes = onp.delete(onp.arange(len(input_shape)), axes)
# Move the reduced axes to the front, and flatten them to 1D.
permutation = axes + tuple(non_axes)
new_shape = (n,) + tuple(input_shape[non_axes])
operand = reshape(operand, new_shape, permutation)
tangent = reshape(tangent, new_shape, permutation)
def _reduce_prod_tree(x, axis=0):
"""Reduce by repeatedly splitting the array and multiplying."""
while x.shape[axis] > 1:
n = x.shape[axis]
n1 = (n + 1) // 2
n2 = n - n1
x1 = slice_in_dim(x, 0, n1)
x2 = slice_in_dim(x, n1, None)
if n2 != n1:
paddings = [(0, 0, 0)] * len(x.shape)
paddings[axis] = (0, 1, 0)
x2 = pad(x2, _const(x, 1), paddings)
x = x1 * x2
shape = list(x.shape)
del shape[axis]
return reshape(x, shape)
return api.jvp(_reduce_prod_tree, (operand,), (tangent,))
reduce_prod_p = standard_primitive(
_reduce_op_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_prod'),
'reduce_prod', _reduce_prod_translation_rule)
ad.primitive_jvps[reduce_prod_p] = _reduce_prod_jvp_rule
batching.defreducer(reduce_prod_p)
def _reduce_chooser_shape_rule(operand, *, axes):
return tuple(onp.delete(operand.shape, axes))
def _reduce_chooser_translation_rule(prim, identity, c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, identity(dtype))],
xla.primitive_subcomputation(prim, scalar, scalar), axes)
def _reduce_chooser_jvp_rule(g, ans, operand, *, axes):
# TODO(mattjj): an alternative is to use variadic reduce to compute the chosen
# locations in a single pass (rather than comparing equality) and use a
# gather, and/or even push along the chosen elements of g (b/112040122)
shape = [1 if i in axes else d for i, d in enumerate(operand.shape)]
location_indicators = convert_element_type(
_eq_meet(operand, reshape(ans, shape)), g.dtype)
counts = _reduce_sum(location_indicators, axes)
return div(_reduce_sum(mul(g, location_indicators), axes), counts)
_reduce_max_translation_rule = partial(_reduce_chooser_translation_rule, max_p,
_get_max_identity)
reduce_max_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_max', _reduce_max_translation_rule)
ad.defjvp2(reduce_max_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_max_p)
_reduce_min_translation_rule = partial(
_reduce_chooser_translation_rule, min_p, _get_min_identity)
reduce_min_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_min', _reduce_min_translation_rule)
ad.defjvp2(reduce_min_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_min_p)
def _reduce_logical_shape_rule(operand, *, axes):
if operand.dtype != onp.bool_:
msg = "logical reduction requires operand dtype bool, got {}."
raise TypeError(msg.format(operand.dtype))
return tuple(onp.delete(operand.shape, axes))
def _reduce_logical_translation_rule(prim, identity, c, operand, *, axes):
scalar = ShapedArray((), onp.bool_)
return xops.Reduce(c, [operand], [xb.constant(c, identity(onp.bool_))],
xla.primitive_subcomputation(prim, scalar, scalar), axes)
_reduce_or_translation_rule = partial(_reduce_logical_translation_rule,
or_p, _get_max_identity)
reduce_or_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(onp.bool_),
'reduce_or', _reduce_or_translation_rule)
batching.defreducer(reduce_or_p)
_reduce_and_translation_rule = partial(_reduce_logical_translation_rule,
and_p, _get_min_identity)
reduce_and_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(onp.bool_),
'reduce_and', _reduce_and_translation_rule)
batching.defreducer(reduce_and_p)
def _reduce_window_shape_rule(operand, init_value, *, jaxpr, consts,
window_dimensions, window_strides, padding):
if operand.dtype != init_value.dtype:
msg = ("reduce_window got inconsistent dtypes for operand and init_value: "
" got operand dtype {} and init_value dtype {}.")
raise TypeError(msg.format(operand.dtype, init_value.dtype))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
def _reduce_window_translation_rule(c, operand, init_value, *, jaxpr, consts,
window_dimensions, window_strides, padding):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.ReduceWindowWithGeneralPadding(
operand, init_value, xla_computation, window_dimensions,
window_strides, (), (), pads)
def _generic_reduce_window_batch_rule(
batched_args, batch_dims, *, jaxpr, consts, window_dimensions,
window_strides, padding):
operand, init = batched_args
bdim, init_bdim = batch_dims
if init_bdim is not None:
raise NotImplementedError("reduce_window batching is not implemented for "
"initial values")
def reduce_window(x, window_dimensions, window_strides, padding):
return reduce_window_p.bind(
x, init, jaxpr=jaxpr, consts=consts, window_dimensions=window_dimensions,
window_strides=window_strides, padding=padding)
return _reduce_window_batch_rule(reduce_window, (operand,), (bdim,),
window_dimensions, window_strides, padding)
reduce_window_p = standard_primitive(
_reduce_window_shape_rule, _input_dtype, 'reduce_window',
_reduce_window_translation_rule)
batching.primitive_batchers[reduce_window_p] = _generic_reduce_window_batch_rule
def _reduce_window_sum_shape_rule(operand, *, window_dimensions, window_strides,
padding):
if not dtypes.issubdtype(operand.dtype, onp.number):
msg = "operand to reduce_window_sum must have a number dtype, got {}"
raise TypeError(msg.format(onp.dtype(operand.dtype).name))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
def _reduce_window_sum_translation_rule(c, operand, *, window_dimensions,
window_strides, padding):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.ReduceWindowWithGeneralPadding(
operand, xb.constant(c, onp.array(0, dtype)),
xla.primitive_subcomputation(add_p, scalar, scalar), window_dimensions,
window_strides, (), (), pads)
def _reduce_window_sum_transpose_rule(cotangent, operand, *, window_dimensions,
window_strides, padding):
assert ad.is_undefined_primal(operand)
input_shape = operand.aval.shape
in_pads = padtype_to_pads(input_shape, window_dimensions, window_strides,
padding)
ones = [1] * len(input_shape)
pads = _conv_general_vjp_lhs_padding(
input_shape, window_dimensions, window_strides, cotangent.shape, in_pads,
ones, ones)
padding_config = [(lo, hi, stride - 1)
for (lo, hi), stride in zip(pads, window_strides)]
pad_cotangent = pad(cotangent, _zero(cotangent), padding_config)
result = _reduce_window_sum(pad_cotangent, window_dimensions, ones,
xla_client.PaddingType.VALID)
assert result.shape == input_shape
return [result]
def _reduce_window_batch_rule(reduce_window, batched_args, bdims, *,
window_dimensions, window_strides, padding):
operand, = batched_args
bdim, = bdims
if bdim is not None:
window_dimensions = \
window_dimensions[:bdim] + (1,) + window_dimensions[bdim:]
window_strides = window_strides[:bdim] + (1,) + window_strides[bdim:]
operand = reduce_window(
operand, window_dimensions, window_strides, padding)
return operand, bdim
reduce_window_sum_p = standard_primitive(
_reduce_window_sum_shape_rule, _input_dtype, 'reduce_window_sum',
_reduce_window_sum_translation_rule)
ad.deflinear2(reduce_window_sum_p, _reduce_window_sum_transpose_rule)
batching.primitive_batchers[reduce_window_sum_p] = partial(
_reduce_window_batch_rule, _reduce_window_sum)
def _reduce_window_chooser_translation_rule(
prim, identity, c, operand, *, window_dimensions, window_strides, padding):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.ReduceWindowWithGeneralPadding(
operand, xb.constant(c, identity(dtype)),
xla.primitive_subcomputation(prim, scalar, scalar), window_dimensions,
window_strides, (), (), pads)
def _reduce_window_chooser_jvp_rule(prim, g, operand, *, window_dimensions,
window_strides, padding):
assert prim is max_p or prim is min_p
select_prim = ge_p if prim is max_p else le_p
return _select_and_gather_add(g, operand, select_prim, window_dimensions,
window_strides, padding)
def _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding):
_check_shapelike("reduce_window", "window_dimensions", window_dimensions)
_check_shapelike("reduce_window", "window_strides", window_strides)
if operand.ndim != len(window_dimensions):
msg = ("reduce_window got the wrong number of window_dimensions for "
"operand: got operand shape {} with window_dimensions {}.")
raise TypeError(msg.format(operand.shape, window_dimensions))
if len(window_strides) != len(window_dimensions):
msg = ("reduce_window got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
return reduce_window_shape_tuple(operand.shape, window_dimensions,
window_strides, padding)
def reduce_window_shape_tuple(operand_shape, window_dimensions, window_strides,
padding):
pads = padtype_to_pads(operand_shape, window_dimensions, window_strides, padding)
operand_padded = onp.add(operand_shape, onp.add(*zip(*pads)))
t = onp.floor_divide(
onp.subtract(operand_padded, window_dimensions), window_strides) + 1
return tuple(t)
_reduce_window_max_translation_rule = partial(
_reduce_window_chooser_translation_rule, max_p, _get_max_identity)
reduce_window_max_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_max',
_reduce_window_max_translation_rule)
ad.defjvp(reduce_window_max_p, partial(_reduce_window_chooser_jvp_rule, max_p))
batching.primitive_batchers[reduce_window_max_p] = partial(
_reduce_window_batch_rule, _reduce_window_max)
_reduce_window_min_translation_rule = partial(
_reduce_window_chooser_translation_rule, min_p, _get_min_identity)
reduce_window_min_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_min',
_reduce_window_min_translation_rule)
ad.defjvp(reduce_window_min_p, partial(_reduce_window_chooser_jvp_rule, min_p))
_reduce_window_min_batch_rule = partial(_reduce_window_batch_rule,
_reduce_window_min)
batching.primitive_batchers[reduce_window_min_p] = partial(
_reduce_window_batch_rule, _reduce_window_min)
def _select_and_scatter_shape_rule(
operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
_check_shapelike("select_and_scatter", "window_dimensions", window_dimensions)
_check_shapelike("select_and_scatter", "window_strides", window_strides)
if len(window_dimensions) != len(window_strides):
msg = ("select_and_scatter got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
return operand.shape
def _select_and_scatter_translation(
c, operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
select = _reduction_computation(c, select_jaxpr, select_consts, init_value)
scatter = _reduction_computation(c, scatter_jaxpr, scatter_consts, init_value)
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, pads, source,
init_value, scatter)
select_and_scatter_p = standard_primitive(
_select_and_scatter_shape_rule, _input_dtype, 'select_and_scatter',
_select_and_scatter_translation)
def _select_and_scatter_add_shape_rule(
source, operand, *, select_prim, window_dimensions, window_strides,
padding):
return operand.shape
def _select_and_scatter_add_translation(
c, source, operand, *, select_prim, window_dimensions, window_strides,
padding):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
select = xla.primitive_subcomputation(select_prim, scalar, scalar)
scatter = xla.primitive_subcomputation(add_p, scalar, scalar)
zero = xb.constant(c, onp.array(0, dtype))
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, pads, source, zero,
scatter)
def _select_and_scatter_add_jvp(
primals, tangents, *, select_prim, window_dimensions, window_strides,
padding):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_scatter_add(
source, operand, select_prim, window_dimensions, window_strides,
padding)
del g_operand
if g_source is ad_util.zero:
tangent_out = ad_util.zero
else:
tangent_out = _select_and_scatter_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding)
return val_out, tangent_out
def _select_and_scatter_add_transpose(
t, source, operand, *, select_prim, window_dimensions, window_strides,
padding):
assert ad.is_undefined_primal(source) and not ad.is_undefined_primal(operand)
source_t = _select_and_gather_add(t, operand, select_prim, window_dimensions,
window_strides, padding)
return [source_t, None]
def _select_and_scatter_add_batch_rule(batched_args, batch_dims, **kwargs):
source, operand = batched_args
s_bdims, o_bdims = batch_dims
if s_bdims is not None and o_bdims is not None:
#TODO(#212): use a map construct instead of unrolling.
source = batching.moveaxis(source, s_bdims, 0)
operand = batching.moveaxis(operand, o_bdims, 0)
outputs = [
_select_and_scatter_add(s, o, **kwargs) for s, o in zip(source, operand)]
outputs = [reshape(out, (1,) + out.shape) for out in outputs]
outputs = concatenate(outputs, 0)
return outputs, 0
elif s_bdims is not None:
#TODO(#212): use a map construct instead of unrolling.
source = batching.moveaxis(source, s_bdims, 0)
outputs = [
_select_and_scatter_add(s, operand, **kwargs) for s in source]
outputs = [reshape(out, (1,) + out.shape) for out in outputs]
outputs = concatenate(outputs, 0)
return outputs, 0
elif o_bdims is not None:
#TODO(#212): use a map construct instead of unrolling.
operand = batching.moveaxis(operand, o_bdims, 0)
outputs = [
_select_and_scatter_add(source, o, **kwargs) for o in operand]
outputs = [reshape(out, (1,) + out.shape) for out in outputs]
outputs = concatenate(outputs, 0)
return outputs, 0
select_and_scatter_add_p = standard_primitive(
_select_and_scatter_add_shape_rule, _input_dtype, 'select_and_scatter_add',
_select_and_scatter_add_translation)
ad.primitive_transposes[select_and_scatter_add_p] = \
_select_and_scatter_add_transpose
ad.primitive_jvps[select_and_scatter_add_p] = _select_and_scatter_add_jvp
batching.primitive_batchers[select_and_scatter_add_p] = \
_select_and_scatter_add_batch_rule
def _select_and_gather_add_shape_rule(
tangents, operand, *, select_prim, window_dimensions, window_strides,
padding):
if tangents.shape != operand.shape:
msg = ("select_and_gather_add tangents and operand shapes must match, "
"got {} and {}.")
raise TypeError(msg.format(tangents.shape, operand.shape))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
_UINT_DTYPES = {
16: onp.uint16,
32: onp.uint32,
64: onp.uint64,
}
_INT_DTYPES = {
16: onp.int16,
32: onp.int32,
64: onp.int64,
}
def _select_and_gather_add_translation(
c, tangents, operand, *, select_prim, window_dimensions, window_strides,
padding, max_bits=64):
shape = c.get_shape(operand)
dtype = shape.numpy_dtype()
etype = shape.xla_element_type()
nbits = dtypes.finfo(dtype).bits
assert nbits <= max_bits
double_word_reduction = nbits * 2 <= max_bits
const = lambda c, dtype, x: xb.constant(c, onp.array(x, dtype=dtype),
canonicalize_types=False)
if double_word_reduction:
# TODO(b/73062247): XLA doesn't yet implement ReduceWindow on tuples, so
# we implement a pair-wise ReduceWindow by packing two k-bit values into
# 2k-bit unsigned integer using bit tricks.
word_dtype = _UINT_DTYPES[nbits]
double_word_dtype = _UINT_DTYPES[nbits * 2]
word_type = xla_client.dtype_to_etype(word_dtype)
double_word_type = xla_client.dtype_to_etype(double_word_dtype)
# Packs two values into a tuple.
def pack(a, b):
a = xops.BitcastConvertType(a, word_type)
b = xops.BitcastConvertType(b, word_type)
a = xops.ConvertElementType(a, double_word_type)
b = xops.ConvertElementType(b, double_word_type)
a = xops.ShiftLeft(a, const(c, double_word_dtype, nbits))
return xops.Or(a, b)
# Unpacks the first element of a tuple.
def fst(c, t):
st = xops.ShiftRightLogical(t, const(c, double_word_dtype, nbits))
return xops.BitcastConvertType(xops.ConvertElementType(st, word_type), etype)
# Unpacks the second element of a tuple.
def snd(t):
return xops.BitcastConvertType(xops.ConvertElementType(t, word_type), etype)
else:
# The double-word trick above only works if we have a sufficiently large
# type. As an alternative, we can pack two half words into a single word,
# at the cost of precision.
# TODO(b/73062247): add support for tuple reductions and remove this case.
warnings.warn("Using reduced precision for gradient of reduce-window "
"min/max operator to work around missing XLA support for "
"pair-reductions. This is likely from a second or "
"higher derivative of a max-pooling operation.")
r_nbits = nbits // 2
# Drop/round the bottom mantissa bits.
nexp = dtypes.finfo(dtype).nexp
nmant = r_nbits - nexp - 1
double_word_dtype = word_dtype = _UINT_DTYPES[nbits]
word_type = xla_client.dtype_to_etype(word_dtype)
# Packs two values into a tuple.
def pack(a, b):
a = xops.ReducePrecision(a, exponent_bits=nexp, mantissa_bits=nmant)
b = xops.ReducePrecision(b, exponent_bits=nexp, mantissa_bits=nmant)
a = xops.BitcastConvertType(a, word_type)
b = xops.BitcastConvertType(b, word_type)
b = xops.ShiftRightLogical(b, const(c, word_dtype, r_nbits))
return xops.Or(a, b)
# Unpacks the first element of a tuple.
def fst(c, t):
st = xops.And(t, const(c, word_dtype, ((1 << r_nbits) - 1) << r_nbits))
return xops.BitcastConvertType(st, etype)
# Unpacks the second element of a tuple.
def snd(t):
return xops.BitcastConvertType(xops.ShiftLeft(t, const(c, word_dtype, r_nbits)),
etype)
def reducer():
c = xla_bridge.make_computation_builder("select_and_gather_pair_reducer")
x = xb.parameter(c, 0,
xla_client.Shape.array_shape(onp.dtype(double_word_dtype), ()))
y = xb.parameter(c, 1,
xla_client.Shape.array_shape(onp.dtype(double_word_dtype), ()))
assert select_prim is ge_p or select_prim is le_p
which = xops.Ge if select_prim is ge_p else xops.Le
xops.Select(which(fst(c, x), fst(c, y)), x, y)
return c.build()
assert select_prim is ge_p or select_prim is le_p, select_prim
init = -onp.inf if select_prim is ge_p else onp.inf
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
out = xops.ReduceWindowWithGeneralPadding(
pack(operand, tangents), pack(const(c, dtype, init), const(c, dtype, 0)),
reducer(), window_dimensions, window_strides, (), (), pads)
return snd(out)
def _select_and_gather_add_jvp(
primals, tangents, *, select_prim, window_dimensions, window_strides,
padding):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_gather_add(
source, operand, select_prim, window_dimensions, window_strides,
padding)
del g_operand
if g_source is ad_util.zero:
tangent_out = ad_util.zero
else:
tangent_out = _select_and_gather_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding)
return val_out, tangent_out
def _select_and_gather_add_transpose(
t, tangents, operand, *, select_prim, window_dimensions, window_strides,
padding):
assert ad.is_undefined_primal(tangents) and not ad.is_undefined_primal(operand)
result = _select_and_scatter_add(t, operand, select_prim, window_dimensions,
window_strides, padding)
return [result, None]
def _select_and_gather_add_batching_rule(
batched_args, batch_dims, *, select_prim, window_dimensions, window_strides,
padding):
t, x = batched_args
t_bdim, x_bdim = batch_dims
size = next(a.shape[bdim] for a, bdim in zip(batched_args, batch_dims)
if bdim is not None)
t = batching.bdim_at_front(t, t_bdim, size)
x = batching.bdim_at_front(x, x_bdim, size)
window_dimensions = (1,) + window_dimensions
window_strides = (1,) + window_strides
out = _select_and_gather_add(t, x, select_prim, window_dimensions,
window_strides, padding)
return (out, 0)
select_and_gather_add_p = standard_primitive(
_select_and_gather_add_shape_rule, _input_dtype, 'select_and_gather_add',
_select_and_gather_add_translation)
ad.primitive_jvps[select_and_gather_add_p] = _select_and_gather_add_jvp
ad.primitive_transposes[select_and_gather_add_p] = \
_select_and_gather_add_transpose
batching.primitive_batchers[select_and_gather_add_p] = \
_select_and_gather_add_batching_rule
xla.backend_specific_translations['tpu'][select_and_gather_add_p] = partial(
_select_and_gather_add_translation,
max_bits=32)
# Parallel prefix-scan. See:
# https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda
# and
# Blelloch, Guy E. 1990. "Prefix Sums and Their Applications.", Technical Report
# CMU-CS-90-190, School of Computer Science, Carnegie Mellon University.
#
# Unlike the Blelloch algorithm, we use an out-of-place algorithm that uses 2n
# space. This is somewhat wasteful if we are interested only in the output of
# the forward pass, but more memory-efficient if we intend to differentiate
# through the implementation of the scan.
def _prescan_power_of_two(x, axis: int, op: Callable, unit):
n = x.shape[axis]
assert n != 0 and n & (n - 1) == 0, "n must be a power of 2"
# Upsweep
xs = []
for d in range(0, n.bit_length() - 1):
x1 = slice_in_dim(x, 0, None, stride=2, axis=axis)
xs.append(x1)
x2 = slice_in_dim(x, 1, None, stride=2, axis=axis)
x = op(x1, x2)
total = x
# Downsweep
x = full_like(total, unit)
pad_left = [(0, 0, 0)] * len(x.shape)
pad_left[axis] = (1, 0, 1)
pad_right = [(0, 0, 0)] * len(x.shape)
pad_right[axis] = (0, 1, 1)
for w in reversed(xs):
x1 = pad(x, _const(x, 0), pad_right)
x2 = pad(x, _const(x, 0), pad_left)
w = pad(w, _const(x, 0), pad_left)
x = x1 + op(x2, w)
return x, total
def _parallel_prefix_scan(x, axis: int, op: Callable, unit):
n = x.shape[axis]
if n == 0:
return x
# Pads to the next largest power of two
nbits = n.bit_length()
if n == (1 << (nbits - 1)):
nbits -= 1
padding = [(0, 0, 0)] * len(x.shape)
padding[axis] = (0, (1 << nbits) - n, 0)
x = pad(x, _const(x, unit), padding)
x, total = _prescan_power_of_two(x, axis, op, unit)
return concatenate((slice_in_dim(x, 1, n, axis=axis), total), dimension=axis)
_cumsum_prefix_scan = partial(_parallel_prefix_scan, op=add, unit=0)
_cumprod_prefix_scan = partial(_parallel_prefix_scan, op=mul, unit=1)
def _cumred_shape_rule(x, *, axis: int):
if axis < 0 or axis >= x.ndim:
raise ValueError(
"axis {} is out of bounds for array of shape {}".format(axis, x.shape))
return x.shape
def _cumsum_transpose_rule(t, *, axis: int):
return [rev(cumsum(rev(t, (axis,)), axis=axis), (axis,))]
def _cumprod_jvp_rule(primals, tangents, *, axis: int):
# Irrespective of backend, we always use the parallel prefix scan
# implementation when differentiating because reduce_window is not
# arbitrarily differentiable.
return api.jvp(partial(_cumprod_prefix_scan, axis=axis), primals, tangents)
def _cumred_tpu_translation_rule(window_reduce: Callable, unit, x, *,
axis: int):
# On TPU, an implementation using reduce_window is handled specially by the
# compiler and is efficient. On other backends, it is O(n^2).
n = x.shape[axis]
if n == 0:
return x
padding = [(0, 0, 0)] * x.ndim
padding[axis] = (n - 1, 0, 0)
x = pad(x, _const(x, unit), padding)
strides = [1] * x.ndim
window_dims = [1] * x.ndim
window_dims[axis] = n
return window_reduce(x, window_dims, strides, xla_client.PaddingType.VALID)
def _cumred_batch_rule(prim, batched_args, batch_dims, *, axis: int):
operand, = batched_args
bdim, = batch_dims
axis = axis if axis < bdim else axis + 1
return prim.bind(operand, axis=axis), bdim
cumsum_p = standard_primitive(
_cumred_shape_rule, partial(_reduce_number_dtype_rule, "cumsum"),
'cumsum', xla.lower_fun(_cumsum_prefix_scan, multiple_results=False))
ad.deflinear(cumsum_p, _cumsum_transpose_rule)
xla.backend_specific_translations['tpu'][cumsum_p] = xla.lower_fun(
partial(_cumred_tpu_translation_rule, _reduce_window_sum, 0),
multiple_results=False)
batching.primitive_batchers[cumsum_p] = partial(_cumred_batch_rule, cumsum_p)
cumprod_p = standard_primitive(
_cumred_shape_rule, partial(_reduce_number_dtype_rule, "cumprod"),
'cumprod', xla.lower_fun(_cumprod_prefix_scan, multiple_results=False))
ad.primitive_jvps[cumprod_p] = _cumprod_jvp_rule
xla.backend_specific_translations['tpu'][cumprod_p] = xla.lower_fun(
partial(_cumred_tpu_translation_rule, _reduce_window_prod, 1),
multiple_results=False)
batching.primitive_batchers[cumprod_p] = partial(_cumred_batch_rule, cumprod_p)
def _sort_abstract_eval(*args, **kwargs):
args = tuple(raise_to_shaped(arg) for arg in args)
if any(arg.shape != args[0].shape for arg in args[1:]):
shapes = " ".join(str(a.shape) for a in args)
raise TypeError(f"Arguments to sort must have equal shapes, got: {shapes}")
return args
def _float_to_int_for_sort(x):
# Switch from a floating point value to a integer value in such a way that
# when using the integer value to compare, we get the same result for normal
# values, and -nan is treated as the smallest value, and nan is treated as
# the largest value.
# If f is a float, and
# x = bit_cast<int32>(f);
# y = x < 0 ? int32_max - x : x;
# then y is ordered as an int32 such that finite values have the obvious
# order, -0 is ordered before 0, and -NaN and NaN appear at the beginning
# and end of the ordering.
# Note that in order to avoid -x to overflow, we calculate
# int32_max - x as unsigned, and then convert back to signed.
if x.dtype == dtypes.bfloat16:
x = convert_element_type(x, onp.float32)
nbits = onp.finfo(x).bits
signed_dtype = _INT_DTYPES[nbits]
unsigned_dtype = _UINT_DTYPES[nbits]
signed = bitcast_convert_type(x, signed_dtype)
unsigned = bitcast_convert_type(x, unsigned_dtype)
flipped = bitcast_convert_type(
sub(unsigned_dtype(onp.iinfo(signed_dtype).max), unsigned), signed_dtype)
return select(lt(signed, _zero(signed)), flipped, signed)
# Default comparator that sorts the operands only on their first arguments.
# For floating point types, a total order is created where
# -NaN < -infinity < ... < -0 < 0 < ... < infinity < NaN.
# For complex types, the (real, imag) pairs are sorted lexicographically
# (following NumPy's semantics).
# This code adds complex-number support to the algorithm from:
# https://github.com/tensorflow/tensorflow/blob/ba43780830f09da72081fe5061c436f1c6203a92/tensorflow/compiler/xla/client/lib/comparators.h#L33
def _sort_lt_comparator(*operands):
assert len(operands) >= 2 and len(operands) % 2 == 0, operands
x, y = operands[:2]
assert x.dtype == y.dtype, (x.dtype, y.dtype)
if onp.issubdtype(x.dtype, onp.complexfloating):
x_keys = [_float_to_int_for_sort(real(x)), _float_to_int_for_sort(imag(x))]
y_keys = [_float_to_int_for_sort(real(y)), _float_to_int_for_sort(imag(y))]
elif onp.issubdtype(x.dtype, onp.floating):
x_keys = [_float_to_int_for_sort(x)]
y_keys = [_float_to_int_for_sort(y)]
else:
x_keys = [x]
y_keys = [y]
p = None
for xk, yk in zip(x_keys[::-1], y_keys[::-1]):
p = (bitwise_or(lt(xk, yk), bitwise_and(eq(xk, yk), p)) if p is not None
else lt(xk, yk))
return p
def _sort_translation_rule(c, *operands, dimension):
types = [c.get_shape(x).xla_element_type() for x in operands]
subc = xla_bridge.make_computation_builder("sort_lt_comparator")
params = [xb.parameter(subc, 2 * i + j, xc.Shape.array_shape(typ, ()))
for i, typ in enumerate(types) for j in range(2)]
result = xla.lower_fun(_sort_lt_comparator,
multiple_results=False)(subc, *params)
comparator = subc.build(result)
out = xops.Sort(c, operands, dimension=dimension, is_stable=True,
comparator=comparator)
return out if len(operands) != 1 else xops.Tuple(c, [out])
def _sort_jvp(primals, tangents, *, dimension):
shape = primals[0].shape
iotas = []
for dim, size in enumerate(shape):
dtype = onp.int32 if size < onp.iinfo(onp.int32).max else onp.int64
iotas.append(broadcasted_iota(dtype, shape, dim))
primals = sort_p.bind(*(primals + (iotas[dimension],)), dimension=dimension)
idx = tuple(primals[-1] if i == dimension else iotas[i]
for i in range(len(shape)))
tangents_out = tuple(ad_util.zero if t is ad_util.zero else t[idx]
for t in tangents)
return tuple(primals[:-1]), tangents_out
def _sort_batch_rule(batched_args, batch_dims, *, dimension):
prototype_arg, new_bdim = next(
(a, b) for a, b in zip(batched_args, batch_dims) if b is not None)
new_args = []
for arg, bdim in zip(batched_args, batch_dims):
if bdim is None:
dims = onp.delete(onp.arange(prototype_arg.ndim), new_bdim)
new_args.append(broadcast_in_dim(arg, prototype_arg.shape, dims))
else:
new_args.append(batching.moveaxis(arg, bdim, new_bdim))
new_dimension = dimension + (new_bdim <= dimension)
bdims = (new_bdim,) * len(new_args)
return sort_p.bind(*new_args, dimension=new_dimension), bdims
sort_p = Primitive('sort')
sort_p.multiple_results = True
sort_p.def_impl(partial(xla.apply_primitive, sort_p))
sort_p.def_abstract_eval(_sort_abstract_eval)
xla.translations[sort_p] = _sort_translation_rule
ad.primitive_jvps[sort_p] = _sort_jvp
batching.primitive_batchers[sort_p] = _sort_batch_rule
def _top_k_abstract_eval(operand, *, k):
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
if len(operand.shape) == 0:
raise TypeError("top_k operand must have >= 1 dimension, got {}"
.format(operand.shape))
shape = list(operand.shape)
if shape[-1] < k:
msg = "k argument to top_k must be no larger than minor dimension; {} vs {}"
raise ValueError(msg.format(k, shape))
shape[-1] = k
return (ShapedArray(shape, operand.dtype),
ShapedArray(shape, onp.dtype(onp.int32)))
def _top_k_jvp(primals, tangents, *, k):
operand, = primals
tangent, = tangents
primals_out = top_k(operand, k)
if tangent is ad_util.zero:
tangents_out = (ad_util.zero, ad_util.zero)
else:
_, k_idxs = primals_out
idx_shape = k_idxs.shape
rank = len(idx_shape)
gather_index_shape = idx_shape + (1,)
gather_indices = []
for i in range(rank-1):
_iota = iota(k_idxs.dtype, idx_shape[i])
_iota = tie_in(operand, _iota)
_iota = broadcast_in_dim(_iota, gather_index_shape, (i,))
gather_indices.append(_iota)
gather_indices.append(reshape(k_idxs, gather_index_shape))
gather_indices = concatenate(gather_indices, dimension=rank)
slice_sizes = (1,) * rank
dnums = GatherDimensionNumbers(
offset_dims=(),
collapsed_slice_dims=tuple(range(rank)),
start_index_map=tuple(range(rank)))
tangents_out = (gather(tangent, gather_indices, dnums, slice_sizes),
ad_util.zero)
return primals_out, tangents_out
def _top_k_batch_rule(batched_args, batch_dims, *, k):
operand, = batched_args
bdim, = batch_dims
if bdim == operand.ndim-1:
perm = onp.arange(operand.ndim)
perm[bdim-1], perm[bdim] = perm[bdim], perm[bdim-1]
top_k_v, top_k_i = top_k(transpose(operand, perm), k=k)
return (transpose(top_k_v, perm),
transpose(top_k_i, perm)), (bdim, bdim)
else:
return top_k(operand, k=k), (bdim, bdim)
top_k_p = Primitive('top_k')
top_k_p.multiple_results = True
top_k_p.def_impl(partial(xla.apply_primitive, top_k_p))
top_k_p.def_abstract_eval(_top_k_abstract_eval)
xla.translations[top_k_p] = partial(standard_translate, 'top_k')
ad.primitive_jvps[top_k_p] = _top_k_jvp
batching.primitive_batchers[top_k_p] = _top_k_batch_rule
def _tie_in_transpose_rule(t):
return [ad_util.zero, t]
def _tie_in_batch_rule(batched_args, batch_dims):
y = tie_in(*batched_args)
_, bdim_y = batch_dims
return y, bdim_y
tie_in_p = Primitive('tie_in')
tie_in_p.def_impl(lambda x, y: y)
tie_in_p.def_abstract_eval(lambda x, y: raise_to_shaped(y))
xla.translations[tie_in_p] = lambda c, x, y: y
ad.deflinear(tie_in_p, _tie_in_transpose_rule)
batching.primitive_batchers[tie_in_p] = _tie_in_batch_rule
masking.masking_rules[tie_in_p] = lambda vals, logical_shapes: vals[1]
def _stop_gradient_jvp_rule(primals, tangents):
# if we don't call stop_gradient here, we'd only peel off one autodiff tracer
x, = primals
return stop_gradient(x), ad_util.zero
def _stop_gradient_batch_rule(batched_args, batch_dims):
x, = batched_args
dim, = batch_dims
return stop_gradient(x), dim
xla.translations[ad_util.stop_gradient_p] = lambda c, x: x
ad.primitive_jvps[ad_util.stop_gradient_p] = _stop_gradient_jvp_rule
batching.primitive_batchers[ad_util.stop_gradient_p] = _stop_gradient_batch_rule
def create_token(x):
"""Creates an XLA token value with no preconditions for sequencing effects.
Experimental.
Args:
x: a dummy argument used to tie the CreateToken operator into a trace. The
value of `x` is ignored.
"""
# x is a dummy argument used to tie the operator into a trace.
return create_token_p.bind(x)
create_token_p = Primitive("create_token")
create_token_p.def_impl(partial(xla.apply_primitive, create_token_p))
create_token_p.def_abstract_eval(lambda _: abstract_token)
xla.translations[create_token_p] = lambda c, _: xops.CreateToken(c)
def after_all(*operands):
"""Merges one or more XLA token values. Experimental.
Wraps the XLA AfterAll operator."""
return after_all_p.bind(*operands)
def _after_all_abstract_eval(*operands):
if any(x is not abstract_token for x in operands):
raise TypeError("Arguments to after_all must be tokens")
return abstract_token
def _after_all_translation_rule(c, *operands):
return xops.AfterAll(c, operands)
after_all_p = Primitive("after_all")
after_all_p.def_impl(partial(xla.apply_primitive, after_all_p))
after_all_p.def_abstract_eval(_after_all_abstract_eval)
xla.translations[after_all_p] = _after_all_translation_rule
def infeed(token, shape=None):
"""Consumes an infeed value of `shape` from the host. Experimental.
`token` is used to sequence infeed and outfeed effects.
"""
flat_shapes, treedef = pytree.flatten(shape)
for shape in flat_shapes:
if not isinstance(shape, ShapedArray):
raise TypeError("shape argument to infeed must be a pytree of "
"ShapedArray values, got {}".format(shape))
xs_and_token = infeed_p.bind(token, shapes=tuple(flat_shapes))
return (treedef.unflatten(xs_and_token[:-1]), xs_and_token[-1])
def _infeed_abstract_eval(token, *, shapes):
if token is not abstract_token:
raise TypeError("First argument to infeed must be a token")
return shapes + (abstract_token,)
def _infeed_translation_rule(c, token, *, shapes):
shape = tuple(xla.aval_to_xla_shape(x).with_major_to_minor_layout_if_absent()
for x in shapes)
xs_and_token = xops.InfeedWithToken(token,
xla_client.Shape.tuple_shape(shape))
xs = xops.GetTupleElement(xs_and_token, 0)
token = xops.GetTupleElement(xs_and_token, 1)
outs = [xops.GetTupleElement(xs, i) for i in range(len(shapes))] + [token]
return xops.Tuple(c, outs)
infeed_p = Primitive("infeed")
infeed_p.multiple_results = True
infeed_p.def_impl(partial(xla.apply_primitive, infeed_p))
infeed_p.def_abstract_eval(_infeed_abstract_eval)
xla.translations[infeed_p] = _infeed_translation_rule
def outfeed(token, xs):
"""Outfeeds value `xs` to the host. Experimental.
`token` is used to sequence infeed and outfeed effects.
"""
flat_xs, _ = pytree.flatten(xs)
return outfeed_p.bind(token, *flat_xs)
def _outfeed_abstract_eval(token, *xs):
if token is not abstract_token:
raise TypeError("First argument to outfeed must be a token")
return abstract_token
def _outfeed_translation_rule(c, token, *xs):
t = xops.Tuple(c, xs)
return xops.OutfeedWithToken(t, token, c.get_shape(t))
outfeed_p = Primitive("outfeed")
outfeed_p.def_impl(partial(xla.apply_primitive, outfeed_p))
outfeed_p.def_abstract_eval(_outfeed_abstract_eval)
xla.translations[outfeed_p] = _outfeed_translation_rule
def rng_uniform(a, b, shape):
"""Stateful PRNG generator. Experimental and its use is discouraged.
Returns uniformly distributed random numbers in the range [a, b)
You should use jax.random for most purposes; this function exists only for
niche use cases with special performance requirements.
This API may be removed at any time.
"""
return rng_uniform_p.bind(a, b, shape=tuple(shape))
def _rng_uniform_abstract_eval(a, b, *, shape):
if a.dtype != b.dtype:
raise ValueError(
"Arguments to rng_uniform must have identical dtypes, got {} "
"and {}.".format(a.dtype, b.dtype))
if a.shape != () or b.shape != ():
raise ValueError(
"Arguments to rng_uniform must be scalars; got shapes {} and {}."
.format(a.shape, b.shape))
return ShapedArray(shape, a.dtype)
def _rng_uniform_translation_rule(c, a, b, *, shape):
xla_shape = xc.Shape.array_shape(c.get_shape(a).xla_element_type(), shape)
return xops.RngUniform(a, b, xla_shape)
rng_uniform_p = Primitive("rng_uniform")
rng_uniform_p.def_impl(partial(xla.apply_primitive, rng_uniform_p))
rng_uniform_p.def_abstract_eval(_rng_uniform_abstract_eval)
xla.translations[rng_uniform_p] = _rng_uniform_translation_rule
### util
_ndim = onp.ndim
def _dilate_shape(shape, dilation):
"""Utility function for computing the shape resulting from a dilation."""
if not onp.all(onp.greater(dilation, 0)):
msg = "All dilations must be positive, got {}."
raise TypeError(msg.format(dilation))
dilation = (1,) * (len(shape) - len(dilation)) + tuple(dilation)
return onp.where(shape == 0, 0,
onp.multiply(dilation, onp.subtract(shape, 1)) + 1)
def _ceil_divide(x1, x2):
return -onp.floor_divide(onp.negative(x1), x2)
def padtype_to_pads(in_shape, window_shape, window_strides, padding):
"""Convert padding string to list of pairs of pad values."""
PaddingType = xla_client.PaddingType
if isinstance(padding, str):
mapping = {'VALID': PaddingType.VALID, 'SAME': PaddingType.SAME}
try:
padding = mapping[padding.upper()]
except KeyError as err:
msg = "Unrecognized padding type: expected 'VALID' or 'SAME', got {}."
raise RuntimeError(msg.format(padding)) from err
if padding == PaddingType.SAME:
out_shape = _ceil_divide(in_shape, window_strides)
pad_sizes = onp.maximum(0, (out_shape - 1) * window_strides +
window_shape - in_shape)
return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]
elif padding == PaddingType.VALID:
return [(0, 0)] * len(in_shape)
else:
msg = "Unknown padding type: {}."
raise TypeError(msg.format(padding))
def _check_same_dtypes(name, ignore_fp_precision, *ttypes):
"""Check that dtypes agree, possibly ignoring float precision."""
# the `ignore_fp_precision` flag exists because the XLA shape inference logic
# allows mixed floating point precision, but the HLO verifier often rejects it
types = list(map(onp.dtype, ttypes)) # canonicalize
if ignore_fp_precision:
types = [
onp.floating if dtypes.issubdtype(dtype, onp.floating)
else onp.complexfloating if dtypes.issubdtype(dtype, onp.complexfloating)
else dtype for dtype in types]
if len({dtypes.canonicalize_dtype(t) for t in types}) != 1:
if ignore_fp_precision:
msg = ("{} requires arguments to have same dtypes up to floating point "
"precision, got {}.")
else:
msg = "{} requires arguments to have the same dtypes, got {}."
raise TypeError(msg.format(name, ", ".join(map(str, types))))
def _check_conv_shapes(name, lhs_shape, rhs_shape, window_strides):
"""Check that conv shapes are valid and are consistent with window_strides."""
if len(lhs_shape) != len(rhs_shape):
msg = "Arguments to {} must have same rank, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if len(lhs_shape) < 2:
msg = "Arguments to {} must have rank at least 2, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if lhs_shape[1] != rhs_shape[1]:
msg = "Arguments to {} must agree on input feature size, got {} and {}."
raise TypeError(msg.format(name, lhs_shape[1], rhs_shape[1]))
_check_shapelike(name, "window_strides", window_strides)
if not onp.all(onp.greater(window_strides, 0)):
msg = "All elements of window_strides must be positive, got {}."
raise TypeError(msg.format(window_strides))
if len(window_strides) != len(lhs_shape) - 2:
msg = "{} window_strides has wrong length: expected {}, got {}."
expected_length = len(lhs_shape) - 2
raise TypeError(msg.format(name, expected_length, len(window_strides)))
def conv_shape_tuple(lhs_shape, rhs_shape, strides, pads, batch_group_count=1):
"""Compute the shape tuple of a conv given input shapes in canonical order."""
if isinstance(pads, str):
pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)
if len(pads) != len(lhs_shape) - 2:
msg = "Wrong number of explicit pads for convolution: expected {}, got {}."
raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))
lhs_padded = onp.add(lhs_shape[2:], onp.sum(onp.array(pads).reshape(-1, 2),
axis=1))
out_space = onp.floor_divide(
onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1
out_space = onp.maximum(0, out_space)
assert lhs_shape[0] % batch_group_count == 0
out_shape = (lhs_shape[0] // batch_group_count, rhs_shape[0])
return tuple(out_shape + tuple(out_space))
def conv_general_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = onp.take(lhs_shape, lhs_perm)
rhs_trans = onp.take(rhs_shape, rhs_perm)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding)
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def conv_transpose_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = onp.take(lhs_shape, lhs_perm)
rhs_trans = onp.take(rhs_shape, rhs_perm)
if isinstance(padding, str):
padding = [_conv_transpose_padding(k, s, padding)
for k,s in zip(rhs_trans[2:], window_strides)]
padding = list(map(onp.sum, padding))
unpad_out_space = [(i-1) * s - k + 2
for i, k, s in zip(lhs_trans[2:],
rhs_trans[2:],
window_strides)]
out_space = onp.sum([unpad_out_space, padding], axis=0).tolist()
out_trans = tuple((lhs_trans[0], rhs_trans[0]) + tuple(out_space))
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def _check_shapelike(fun_name, arg_name, obj):
"""Check that `obj` is a shape-like value (e.g. tuple of nonnegative ints)."""
if not isinstance(obj, (tuple, list, onp.ndarray)):
msg = "{} {} must be of type tuple/list/ndarray, got {}."
raise TypeError(msg.format(fun_name, arg_name, type(obj)))
# bool(obj) for an ndarray raises an error, so we check len
if not len(obj): # pylint: disable=g-explicit-length-test
return
obj_arr = onp.array(obj)
if obj_arr.ndim != 1:
msg = "{} {} must be rank 1, got {}."
raise TypeError(msg.format(obj_arr.ndim))
try:
canonicalize_shape(obj_arr)
except TypeError:
msg = "{} {} must have every element be an integer type, got {}."
raise TypeError(msg.format(fun_name, arg_name, tuple(map(type, obj))))
if not (obj_arr >= 0).all():
msg = "{} {} must have every element be nonnegative, got {}."
raise TypeError(msg.format(fun_name, arg_name, obj))
def _dynamic_slice_indices(operand, start_indices):
if not isinstance(start_indices, (tuple, list)):
if start_indices.ndim != 1:
raise ValueError("Slice indices must be a 1D sequence, got {}"
.format(start_indices.shape))
start_indices = [reshape(slice(start_indices, [i], [i+1]), ())
for i in range(operand.ndim)]
else:
start_indices = [onp.asarray(i, dtype=dtypes.int_) if isinstance(i, int)
else i for i in start_indices]
if len(start_indices) != operand.ndim:
msg = ("Length of slice indices must match number of operand dimensions ({} "
"vs {})")
raise ValueError(msg.format(len(start_indices), operand.shape))
# map int over operand.shape to raise any dynamic-shape errors
return [select(lt(i, _const(i, 0)), add(i, _const(i, int(d))), i)
for i, d in zip(start_indices, operand.shape)]
def _const(example, val):
if dtypes.is_python_scalar(example):
return dtypes.scalar_type_of(example)(val)
return onp.array(val, _dtype(example))
_zeros: Callable = partial(full_like, fill_value=0)
_zero: Callable = partial(full_like, shape=(), fill_value=0)
_ones: Callable = partial(full_like, fill_value=1)
_one: Callable = partial(full_like, shape=(), fill_value=1)
_twos: Callable = partial(full_like, fill_value=2)
_two: Callable = partial(full_like, shape=(), fill_value=2)
dtype: Callable = dtypes.result_type
_dtype: Callable = dtypes.result_type
def _iscomplex(x) -> bool:
return dtypes.issubdtype(_dtype(x), onp.complexfloating)
def ranges_like(*xs):
start = 0
for x in xs:
x_len = len(x)
yield range(start, start + x_len)
start += x_len
def remaining(original, *removed_lists):
blacklist = set(itertools.chain(*removed_lists))
return [i for i in original if i not in blacklist]
def _canonicalize_precision(precision):
if precision is None:
return None
if isinstance(precision, Precision):
return precision
else:
msg = "Precision argument must be None or a lax.Precision value; got {}"
raise ValueError(msg.format(precision))
def conv_dimension_numbers(lhs_shape, rhs_shape, dimension_numbers):
"""Converts convolution `dimension_numbers` to a `ConvDimensionNumbers`.
Args:
lhs_shape: tuple of nonnegative integers, shape of the convolution input.
rhs_shape: tuple of nonnegative integers, shape of the convolution kernel.
dimension_numbers: None or a tuple/list of strings or a ConvDimensionNumbers
object following the convolution dimension number specification format in
xla_client.py.
Returns:
A `ConvDimensionNumbers` object that represents `dimension_numbers` in the
canonical form used by lax functions.
"""
if isinstance(dimension_numbers, ConvDimensionNumbers):
return dimension_numbers
if len(lhs_shape) != len(rhs_shape):
msg = "convolution requires lhs and rhs ndim to be equal, got {} and {}."
raise TypeError(msg.format(len(lhs_shape), len(rhs_shape)))
if dimension_numbers is None:
iota = tuple(range(len(lhs_shape)))
return ConvDimensionNumbers(iota, iota, iota)
elif isinstance(dimension_numbers, (list, tuple)):
if len(dimension_numbers) != 3:
msg = "convolution dimension_numbers list/tuple must be length 3, got {}."
raise TypeError(msg.format(len(dimension_numbers)))
if not all(isinstance(elt, str) for elt in dimension_numbers):
msg = "convolution dimension_numbers elements must be strings, got {}."
raise TypeError(msg.format(tuple(map(type, dimension_numbers))))
msg = ("convolution dimension_numbers[{}] must have len equal to the ndim "
"of lhs and rhs, got {} for lhs and rhs shapes {} and {}.")
for i, elt in enumerate(dimension_numbers):
if len(elt) != len(lhs_shape):
raise TypeError(msg.format(i, len(elt), lhs_shape, rhs_shape))
lhs_spec, rhs_spec, out_spec = conv_general_permutations(dimension_numbers)
return ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)
else:
msg = "convolution dimension_numbers must be tuple/list or None, got {}."
raise TypeError(msg.format(type(dimension_numbers)))
def conv_general_permutations(dimension_numbers):
"""Utility for convolution dimension permutations relative to Conv HLO."""
lhs_spec, rhs_spec, out_spec = dimension_numbers
lhs_char, rhs_char, out_char = charpairs = ("N", "C"), ("O", "I"), ("N", "C")
for i, (a, b) in enumerate(charpairs):
if not dimension_numbers[i].count(a) == dimension_numbers[i].count(b) == 1:
msg = ("convolution dimension_numbers[{}] must contain the characters "
"'{}' and '{}' exactly once, got {}.")
raise TypeError(msg.format(i, a, b, dimension_numbers[i]))
if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):
msg = ("convolution dimension_numbers[{}] cannot have duplicate "
"characters, got {}.")
raise TypeError(msg.format(i, dimension_numbers[i]))
if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==
set(out_spec) - set(out_char)):
msg = ("convolution dimension_numbers elements must each have the same "
"set of spatial characters, got {}.")
raise TypeError(msg.format(dimension_numbers))
def getperm(spec, charpair):
spatial = (i for i, c in enumerate(spec) if c not in charpair)
if spec is not rhs_spec:
spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))
return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)
lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)
return lhs_perm, rhs_perm, out_perm
def _conv_general_proto(dimension_numbers):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_client.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _conv_general_vjp_lhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
pad_before = onp.subtract(rhs_dilated_shape, [lo for lo, _ in padding]) - 1
pad_after = (onp.add(lhs_dilated_shape, rhs_dilated_shape) - 1
- out_dilated_shape - pad_before)
return zip(pad_before, pad_after)
def _conv_general_vjp_rhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
total_in_pad = out_dilated_shape + rhs_dilated_shape - lhs_dilated_shape - 1
return [(pad[0], tot - pad[0]) for pad, tot in zip(padding, total_in_pad)]
def _balanced_eq(x, z, y):
return div(select(_eq_meet(x, z), _ones(z), _zeros(z)),
select(_eq_meet(y, z), _twos(z), _ones(z)))
def _eq_meet(a, b):
a_dtype, b_dtype = _dtype(a), _dtype(b)
if a_dtype != b_dtype:
higher_dtype = dtypes.promote_types(a_dtype, b_dtype)
if higher_dtype == a_dtype:
a = convert_element_type(a, b_dtype)
else:
b = convert_element_type(b, a_dtype)
return eq(a, b)
def _abstractify(x):
return raise_to_shaped(core.get_aval(x))
def _check_user_dtype_supported(dtype, fun_name=None):
onp_dtype = onp.dtype(dtype)
if onp_dtype.kind not in "biufc" and onp_dtype.type != dtypes.bfloat16:
msg = f"JAX only supports number and bool dtypes, got dtype {dtype}"
raise TypeError(msg)
if dtype is not None and onp_dtype != dtypes.canonicalize_dtype(dtype):
msg = ("Explicitly requested dtype {} {} is not available, "
"and will be truncated to dtype {}. To enable more dtypes, set the "
"jax_enable_x64 configuration option or the JAX_ENABLE_X64 shell "
"environment variable. "
"See https://github.com/google/jax#current-gotchas for more.")
fun_name = "requested in {}".format(fun_name) if fun_name else ""
truncated_dtype = dtypes.canonicalize_dtype(dtype).name
warnings.warn(msg.format(dtype, fun_name , truncated_dtype))
def _canonicalize_axis(axis, num_dims):
"""Canonicalize an axis in (-num_dims, num_dims) to [0, num_dims)."""
axis = int(axis)
if axis < 0:
axis = axis + num_dims
if axis < 0 or axis >= num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
return axis
| 41.774175 | 141 | 0.711189 |
import builtins
import collections
import enum
import functools
import itertools
import operator
import string
from typing import (Any, Callable, List, NamedTuple, Optional, Sequence, Union,
Tuple, Type)
import warnings
import numpy as onp
from ..util import partial, prod
from .. import core
from .. import ad_util
from .. import api
from .. import linear_util as lu
from .. import dtypes
from .. import lazy
from .. import lib
from ..config import flags
from ..core import Primitive
from ..abstract_arrays import (UnshapedArray, ShapedArray, ConcreteArray,
AbstractToken, array_types, make_shaped_array,
raise_to_shaped, abstract_token, canonicalize_shape)
from ..interpreters import partial_eval as pe
from ..interpreters import xla
from ..interpreters import pxla
from ..interpreters import ad
from ..interpreters import batching
from ..interpreters import masking
from ..util import curry, cache, safe_zip, unzip2, prod
from ..tree_util import build_tree, tree_unflatten, tree_map
from ..lib import pytree
from ..lib import xla_bridge
from ..lib import xla_client
xb = xla_bridge
xc = xla_client
xops = xla_client.ops
FLAGS = flags.FLAGS
_max = builtins.max
_min = builtins.max
_reduce = functools.reduce
Array = Any
DType = Any
Shape = Sequence[int]
@cache()
def broadcast_shapes(*shapes):
if len(shapes) == 1:
return shapes[0]
ndim = _max(len(shape) for shape in shapes)
shapes = onp.array([(1,) * (ndim - len(shape)) + shape for shape in shapes])
is_zero = onp.any(shapes == 0, axis=0)
max_shape = onp.max(shapes, axis=0)
result_shape = onp.where(is_zero, 0, max_shape)
if not onp.all((shapes == result_shape) | (shapes == 1)):
raise ValueError("Incompatible shapes for broadcasting: {}"
.format(tuple(map(tuple, shapes))))
return canonicalize_shape(result_shape)
def _identity(x): return x
ray:
return neg_p.bind(x)
def sign(x: Array) -> Array:
return sign_p.bind(x)
def nextafter(x1: Array, x2: Array) -> Array:
return nextafter_p.bind(_brcast(x1, x2), _brcast(x2, x1))
def floor(x: Array) -> Array:
return floor_p.bind(x)
def ceil(x: Array) -> Array:
return ceil_p.bind(x)
def round(x: Array) -> Array:
return round_p.bind(x)
def is_finite(x: Array) -> Array:
return is_finite_p.bind(x)
def exp(x: Array) -> Array:
return exp_p.bind(x)
def expm1(x: Array) -> Array:
return expm1_p.bind(x)
def log(x: Array) -> Array:
return log_p.bind(x)
def log1p(x: Array) -> Array:
return log1p_p.bind(x)
def tanh(x: Array) -> Array:
return tanh_p.bind(x)
def sin(x: Array) -> Array:
return sin_p.bind(x)
def cos(x: Array) -> Array:
return cos_p.bind(x)
def atan2(x: Array, y: Array) -> Array:
return atan2_p.bind(x, y)
def betainc(a: Array, b: Array, x: Array) -> Array:
return regularized_incomplete_beta_p.bind(a, b, x)
def lgamma(x: Array) -> Array:
return lgamma_p.bind(x)
def digamma(x: Array) -> Array:
return digamma_p.bind(x)
def igamma(a: Array, x: Array) -> Array:
return igamma_p.bind(a, x)
def igammac(a: Array, x: Array) -> Array:
return igammac_p.bind(a, x)
def igamma_grad_a(a: Array, x: Array) -> Array:
return igamma_grad_a_p.bind(a, x)
def bessel_i0e(x: Array) -> Array:
return bessel_i0e_p.bind(x)
def bessel_i1e(x: Array) -> Array:
return bessel_i1e_p.bind(x)
def erf(x: Array) -> Array:
return erf_p.bind(x)
def erfc(x: Array) -> Array:
return erfc_p.bind(x)
def erf_inv(x: Array) -> Array:
return erf_inv_p.bind(x)
def real(x: Array) -> Array:
return real_p.bind(x)
def imag(x: Array) -> Array:
return imag_p.bind(x)
def complex(x: Array, y: Array) -> Array:
return complex_p.bind(_brcast(x, y), _brcast(y, x))
def conj(x: Array) -> Array:
return conj_p.bind(x, input_dtype=_dtype(x))
def abs(x: Array) -> Array:
return abs_p.bind(x)
def pow(x: Array, y: Array) -> Array:
return pow_p.bind(x, y)
def integer_pow(x: Array, y: int) -> Array:
if y == 0:
return _ones(x)
elif y == 1:
return x
else:
return integer_pow_p.bind(x, y=y)
def sqrt(x: Array) -> Array:
return sqrt_p.bind(x)
def rsqrt(x: Array) -> Array:
return rsqrt_p.bind(x)
def bitwise_not(x: Array) -> Array:
return not_p.bind(x)
def bitwise_and(x: Array, y: Array) -> Array:
return and_p.bind(x, y)
def bitwise_or(x: Array, y: Array) -> Array:
return or_p.bind(x, y)
def bitwise_xor(x: Array, y: Array) -> Array:
return xor_p.bind(x, y)
def population_count(x: Array) -> Array:
return population_count_p.bind(x)
def add(x: Array, y: Array) -> Array:
return add_p.bind(x, y)
def sub(x: Array, y: Array) -> Array:
return sub_p.bind(x, y)
def mul(x: Array, y: Array) -> Array:
return mul_p.bind(x, y)
def div(x: Array, y: Array) -> Array:
return div_p.bind(x, y)
def rem(x: Array, y: Array) -> Array:
return rem_p.bind(x, y)
def max(x: Array, y: Array) -> Array:
return max_p.bind(x, y)
def min(x: Array, y: Array) -> Array:
return min_p.bind(x, y)
def shift_left(x: Array, y: Array) -> Array:
return shift_left_p.bind(x, y)
def shift_right_arithmetic(x: Array, y: Array) -> Array:
return shift_right_arithmetic_p.bind(x, y)
def shift_right_logical(x: Array, y: Array) -> Array:
return shift_right_logical_p.bind(x, y)
def eq(x: Array, y: Array) -> Array:
return eq_p.bind(x, y)
def ne(x: Array, y: Array) -> Array:
return ne_p.bind(x, y)
def ge(x: Array, y: Array) -> Array:
return ge_p.bind(x, y)
def gt(x: Array, y: Array) -> Array:
return gt_p.bind(x, y)
def le(x: Array, y: Array) -> Array:
return le_p.bind(x, y)
def lt(x: Array, y: Array) -> Array:
return lt_p.bind(x, y)
def convert_element_type(operand: Array, new_dtype: DType) -> Array:
new_dtype = dtypes.canonicalize_dtype(new_dtype)
if type(operand) in dtypes.python_scalar_dtypes:
operand = onp.asarray(operand, new_dtype)
old_dtype = dtypes.canonicalize_dtype(_dtype(operand))
if old_dtype == new_dtype:
return operand
if (dtypes.issubdtype(old_dtype, onp.complexfloating) and
not dtypes.issubdtype(new_dtype, onp.complexfloating)):
msg = "Casting complex values to real discards the imaginary part"
warnings.warn(msg, onp.ComplexWarning, stacklevel=2)
return convert_element_type_p.bind(
operand, new_dtype=new_dtype, old_dtype=old_dtype)
def bitcast_convert_type(operand: Array, new_dtype: DType) -> Array:
new_dtype = dtypes.canonicalize_dtype(new_dtype)
old_dtype = _dtype(operand)
if old_dtype != new_dtype:
return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)
else:
return operand
def clamp(min: Array, x: Array, max: Array) -> Array:
return clamp_p.bind(min, x, max)
def concatenate(operands: Sequence[Array], dimension: int) -> Array:
return concatenate_p.bind(*operands, dimension=dimension)
Precision = xla_client.PrecisionConfig.Precision
Precision.__str__ = lambda precision: precision.name
PrecisionType = Any
class ConvDimensionNumbers(NamedTuple):
lhs_spec: Sequence[int]
rhs_spec: Sequence[int]
out_spec: Sequence[int]
ConvGeneralDilatedDimensionNumbers = Union[
None, ConvDimensionNumbers, Tuple[str, str, str]]
def conv_general_dilated(
lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]] = None,
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
feature_group_count: int = 1, batch_group_count: int = 1,
precision: Optional[PrecisionType] = None) -> Array:
dnums: ConvDimensionNumbers
dnums = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
if lhs_dilation is None:
lhs_dilation = (1,) * (lhs.ndim - 2)
elif isinstance(padding, str) and not len(lhs_dilation) == lhs_dilation.count(1):
raise ValueError(
"String padding is not implemented for transposed convolution "
"using this op. Please either exactly specify the required padding or "
"use conv_transpose.")
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
if isinstance(padding, str):
lhs_perm, rhs_perm, _ = dnums
rhs_shape = onp.take(rhs.shape, rhs_perm)[2:]
effective_rhs_shape = [(k-1) * r + 1 for k, r in zip(rhs_shape, rhs_dilation)]
padding = padtype_to_pads(
onp.take(lhs.shape, lhs_perm)[2:], effective_rhs_shape,
window_strides, padding)
return conv_general_dilated_p.bind(
lhs, rhs, window_strides=tuple(window_strides), padding=tuple(padding),
lhs_dilation=tuple(lhs_dilation), rhs_dilation=tuple(rhs_dilation),
dimension_numbers=dnums,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
lhs_shape=lhs.shape, rhs_shape=rhs.shape,
precision=_canonicalize_precision(precision))
def dot(lhs: Array, rhs: Array, precision: Optional[PrecisionType] = None) -> Array:
if 1 <= lhs.ndim <= 2 and 1 <= rhs.ndim <= 2 and lhs.shape[-1] == rhs.shape[0]:
return dot_general(lhs, rhs, (((lhs.ndim - 1,), (0,)), ((), ())),
precision=precision)
else:
raise TypeError("Incompatible shapes for dot: got {} and {}.".format(
lhs.shape, rhs.shape))
DotDimensionNumbers = Tuple[Tuple[Sequence[int], Sequence[int]],
Tuple[Sequence[int], Sequence[int]]]
def dot_general(lhs: Array, rhs: Array, dimension_numbers: DotDimensionNumbers,
precision: Optional[PrecisionType] = None) -> Array:
contract_dims_seq, batch_dims_seq = dimension_numbers
contract_dims = tuple(map(lambda x: tuple(x), contract_dims_seq))
batch_dims = tuple(map(lambda x: tuple(x), batch_dims_seq))
if not dtypes.issubdtype(lhs.dtype, onp.inexact):
# sum of products instead.
lhs_contract_dims, rhs_contract_dims = contract_dims
lhs_batch_dims, rhs_batch_dims = batch_dims
lhs_noncontract_dims = tuple(sorted(
set(range(onp.ndim(lhs))) - set(lhs_batch_dims) - set(lhs_contract_dims)))
rhs_noncontract_dims = tuple(sorted(
set(range(onp.ndim(rhs))) - set(rhs_batch_dims) - set(rhs_contract_dims)))
lhs = transpose(lhs,
lhs_batch_dims + lhs_noncontract_dims + lhs_contract_dims)
rhs = transpose(rhs,
rhs_batch_dims + rhs_noncontract_dims + rhs_contract_dims)
new_lhs_shape = onp.insert(onp.array(onp.shape(lhs), dtype=onp.int64),
len(lhs_batch_dims) + len(lhs_noncontract_dims),
(1,) * len(rhs_noncontract_dims))
new_rhs_shape = onp.insert(onp.array(onp.shape(rhs), dtype=onp.int64),
len(lhs_batch_dims),
(1,) * len(lhs_noncontract_dims))
lhs = reshape(lhs, new_lhs_shape)
rhs = reshape(rhs, new_rhs_shape)
out_ndim = (len(lhs_batch_dims) + len(lhs_noncontract_dims) +
len(rhs_noncontract_dims))
op_product = bitwise_and if lhs.dtype == onp.bool_ else mul
op_sum = bitwise_or if lhs.dtype == onp.bool_ else add
return reduce(op_product(lhs, rhs), _zero(lhs), op_sum,
tuple(range(out_ndim, out_ndim + len(lhs_contract_dims))))
return dot_general_p.bind(lhs, rhs,
dimension_numbers=(contract_dims, batch_dims),
precision=_canonicalize_precision(precision))
def broadcast(operand: Array, sizes: Sequence[int]) -> Array:
dims = tuple(range(len(sizes), len(sizes) + onp.ndim(operand)))
return broadcast_in_dim(operand, tuple(sizes) + onp.shape(operand), dims)
def broadcast_in_dim(operand: Array, shape: Shape,
broadcast_dimensions: Sequence[int]) -> Array:
shape = _broadcast_in_dim_shape_rule(
operand, shape=shape, broadcast_dimensions=broadcast_dimensions)
if onp.ndim(operand) == len(shape) and not len(broadcast_dimensions):
return operand
return broadcast_in_dim_p.bind(
operand, shape=tuple(shape),
broadcast_dimensions=tuple(broadcast_dimensions))
def broadcast_to_rank(x: Array, rank: int) -> Array:
return broadcast(x, (1,) * (rank - x.ndim))
def reshape(operand: Array, new_sizes: Shape,
dimensions: Optional[Sequence[int]] = None) -> Array:
new_sizes = canonicalize_shape(new_sizes) # TODO
new_sizes = tuple(new_sizes)
same_shape = onp.shape(operand) == new_sizes
same_dims = dimensions is None or tuple(dimensions) == tuple(range(onp.ndim(operand)))
if onp.shape(operand) and same_shape and same_dims:
return operand
else:
return reshape_p.bind(
operand, new_sizes=new_sizes,
dimensions=None if dimensions is None or same_dims else tuple(dimensions))
def pad(operand: Array, padding_value: Array,
padding_config: Sequence[Tuple[int, int, int]]) -> Array:
return pad_p.bind(operand, padding_value, padding_config=tuple(padding_config))
def rev(operand: Array, dimensions: Sequence[int]) -> Array:
return rev_p.bind(operand, dimensions=tuple(dimensions))
def select(pred: Array, on_true: Array, on_false: Array) -> Array:
return select_p.bind(pred, on_true, on_false)
def slice(operand: Array, start_indices: Sequence[int],
limit_indices: Sequence[int],
strides: Optional[Sequence[int]] = None) -> Array:
if (onp.all(onp.equal(start_indices, 0))
and onp.all(onp.equal(limit_indices, operand.shape))
and strides is None):
return operand
else:
return slice_p.bind(operand, start_indices=tuple(start_indices),
limit_indices=tuple(limit_indices),
strides=None if strides is None else tuple(strides))
def dynamic_slice(operand: Array, start_indices: Sequence[Array],
slice_sizes: Shape) -> Array:
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_slice_p.bind(operand, *start_indices,
slice_sizes=tuple(slice_sizes))
def dynamic_update_slice(operand: Array, update: Array,
start_indices: Array) -> Array:
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_update_slice_p.bind(operand, update, *start_indices)
class GatherDimensionNumbers(NamedTuple):
offset_dims: Sequence[int]
collapsed_slice_dims: Sequence[int]
start_index_map: Sequence[int]
def gather(operand: Array, start_indices: Array,
dimension_numbers: GatherDimensionNumbers,
slice_sizes: Shape) -> Array:
return gather_p.bind(
operand, start_indices, dimension_numbers=dimension_numbers,
slice_sizes=canonicalize_shape(slice_sizes))
class ScatterDimensionNumbers(NamedTuple):
update_window_dims: Sequence[int]
inserted_window_dims: Sequence[int]
scatter_dims_to_operand_dims: Sequence[int]
def scatter_add(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
jaxpr, consts = _reduction_jaxpr(add, _abstractify(_const(operand, 0)))
return scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
def scatter_mul(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
jaxpr, consts = _reduction_jaxpr(mul, _abstractify(_const(operand, 1)))
return scatter_mul_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
def scatter_min(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
jaxpr, consts = _reduction_jaxpr(min, _abstractify(_const(operand, 0)))
return scatter_min_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
def scatter_max(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
jaxpr, consts = _reduction_jaxpr(max, _abstractify(_const(operand, 0)))
return scatter_max_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
# Define this outside of scatter to ensure cache hits.
_scatter_reduction_computation = lambda x, y: y
def scatter(operand: Array, scatter_indices:Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers) -> Array:
jaxpr, consts = _reduction_jaxpr(_scatter_reduction_computation,
_abstractify(_const(operand, 0)))
return scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers)
def index_take(src: Array, idxs: Array, axes: Sequence[int]) -> Array:
indices = concatenate([reshape(i, [i.shape[0], 1]) for i in idxs], 1)
indices = indices % onp.array([src.shape[ax] for ax in axes])
slice_sizes = list(src.shape)
for ax in axes:
slice_sizes[ax] = 1
offset_dims = tuple(range(1, src.ndim - indices.shape[1] + 1))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=axes,
start_index_map=axes)
return gather(src, indices, dimension_numbers=dnums,
slice_sizes=tuple(slice_sizes))
def transpose(operand: Array, permutation: Sequence[int]) -> Array:
permutation = tuple(permutation)
if permutation == tuple(range(len(permutation))):
return operand
else:
return transpose_p.bind(operand, permutation=permutation)
def reduce(operand: Array, init_value: Array, computation: Callable,
dimensions: Sequence[int]) -> Array:
monoid_reducer = _get_monoid_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, dimensions)
else:
jaxpr, consts = _reduction_jaxpr(computation, _abstractify(init_value))
return reduce_p.bind(operand, init_value, computation=computation,
jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions))
@cache()
def _reduction_jaxpr(computation, aval):
pval = pe.PartialVal.unknown(aval)
comp = lu.wrap_init(lambda x, y: (computation(x, y),))
jaxpr, _, consts = pe.trace_to_jaxpr(comp, (pval, pval), instantiate=False)
return jaxpr, consts
def _get_monoid_reducer(monoid_op: Callable, x: Array) -> Optional[Callable]:
aval = core.get_aval(x)
dtype = _dtype(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_sum
if monoid_op is mul:
return aval.val == 1 and _reduce_prod
elif monoid_op is bitwise_or and dtype == onp.bool_:
return aval.val == _get_max_identity(dtype) and _reduce_or
elif monoid_op is bitwise_and and dtype == onp.bool_:
return aval.val == _get_min_identity(dtype) and _reduce_and
elif monoid_op is max:
return aval.val == _get_max_identity(dtype) and _reduce_max
elif monoid_op is min:
return aval.val == _get_min_identity(dtype) and _reduce_min
return None
def _get_max_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, onp.inexact):
return onp.array(-onp.inf, dtype)
elif dtypes.issubdtype(dtype, onp.integer):
return onp.array(dtypes.iinfo(dtype).min, dtype)
elif dtypes.issubdtype(dtype, onp.bool_):
return onp.array(False, onp.bool_)
def _get_min_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, onp.inexact):
return onp.array(onp.inf, dtype)
elif dtypes.issubdtype(dtype, onp.integer):
return onp.array(dtypes.iinfo(dtype).max, dtype)
elif dtypes.issubdtype(dtype, onp.bool_):
return onp.array(True, onp.bool_)
def _reduce_sum(operand: Array, axes: Sequence[int]) -> Array:
return reduce_sum_p.bind(operand, axes=tuple(axes))
def _reduce_prod(operand: Array, axes: Sequence[int]) -> Array:
return reduce_prod_p.bind(operand, axes=tuple(axes))
def _reduce_max(operand: Array, axes: Sequence[int]) -> Array:
return reduce_max_p.bind(operand, axes=tuple(axes))
def _reduce_min(operand: Array, axes: Sequence[int]) -> Array:
return reduce_min_p.bind(operand, axes=tuple(axes))
def _reduce_or(operand: Array, axes: Sequence[int]) -> Array:
return reduce_or_p.bind(operand, axes=tuple(axes))
def _reduce_and(operand: Array, axes: Sequence[int]) -> Array:
return reduce_and_p.bind(operand, axes=tuple(axes))
def reduce_window(operand: Array, init_value: Array, computation: Callable,
window_dimensions: Shape, window_strides: Sequence[int],
padding: str) -> Array:
monoid_reducer = _get_monoid_window_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, window_dimensions, window_strides, padding)
else:
jaxpr, consts = _reduction_jaxpr(computation, _abstractify(init_value))
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _get_monoid_window_reducer(monoid_op: Callable, x: Array) -> Optional[Callable]:
aval = core.get_aval(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_window_sum
elif monoid_op is max:
return aval.val == _get_max_identity(aval.dtype) and _reduce_window_max
elif monoid_op is min:
return aval.val == _get_min_identity(aval.dtype) and _reduce_window_min
return None
def _reduce_window_sum(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int], padding: str) -> Array:
return reduce_window_sum_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _reduce_window_prod(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int], padding: str) -> Array:
init_value = _const(operand, 1)
jaxpr, consts = _reduction_jaxpr(mul, _abstractify(init_value))
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _reduce_window_max(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int], padding: str) -> Array:
return reduce_window_max_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _reduce_window_min(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int], padding: str) -> Array:
return reduce_window_min_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_scatter(operand: Array, select: Callable,
window_dimensions: Shape, window_strides: Sequence[int],
padding: str, source: Array, init_value: Array,
scatter: Callable) -> Array:
select_jaxpr, select_consts = _reduction_jaxpr(select, _abstractify(init_value))
scatter_jaxpr, scatter_consts = _reduction_jaxpr(scatter, _abstractify(init_value))
return select_and_scatter_p.bind(
operand, source, init_value, select_jaxpr=select_jaxpr,
select_consts=select_consts, scatter_jaxpr=scatter_jaxpr,
scatter_consts=scatter_consts, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_scatter_add(source: Array, operand: Array,
select_prim: core.Primitive,
window_dimensions: Shape,
window_strides: Sequence[int],
padding: str) -> Array:
return select_and_scatter_add_p.bind(
source, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def _select_and_gather_add(tangents: Array, operand: Array,
select_prim: core.Primitive,
window_dimensions: Shape,
window_strides: Sequence[int],
padding: str) -> Array:
return select_and_gather_add_p.bind(
tangents, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding)
def cumsum(operand: Array, axis: int) -> Array:
return cumsum_p.bind(operand, axis=int(axis))
def cumprod(operand: Array, axis: int) -> Array:
return cumprod_p.bind(operand, axis=int(axis))
def sort(operand: Union[Array, Tuple[Array, ...]], dimension: int = -1
) -> Union[Array, Tuple[Array, ...]]:
if isinstance(operand, tuple):
if len(operand) == 0:
raise TypeError("Sort requires at least one operand")
dimension = _canonicalize_axis(dimension, len(operand[0].shape))
return tuple(sort_p.bind(*operand, dimension=dimension))
else:
dimension = _canonicalize_axis(dimension, len(operand.shape))
return sort_p.bind(operand, dimension=dimension)[0]
def sort_key_val(keys: Array, values: Array,
dimension: int = -1) -> Tuple[Array, Array]:
dimension = _canonicalize_axis(dimension, len(keys.shape))
k, v = sort_p.bind(keys, values, dimension=dimension)
return k, v
def top_k(operand: Array, k: int) -> Tuple[Array, Array]:
k = int(k)
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
return top_k_p.bind(operand, k=k)
def tie_in(x: Array, y: Array) -> Array:
return tie_in_p.bind(x, y)
def full(shape: Shape, fill_value: Array, dtype: Optional[DType] = None) -> Array:
shape = canonicalize_shape(shape)
if onp.shape(fill_value):
msg = "full must be called with scalar fill_value, got fill_value.shape {}."
raise TypeError(msg.format(onp.shape(fill_value)))
dtype = dtypes.canonicalize_dtype(dtype or _dtype(fill_value))
# TODO(mattjj): remove device_put when dtype conversion produces DeviceArray
fill_value = xla.device_put_p.bind(convert_element_type(fill_value, dtype))
return broadcast(fill_value, shape)
def iota(dtype: DType, size: int) -> Array:
size = size if type(size) is masking.Poly else int(size)
shape = canonicalize_shape((size,))
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.iota(dtype, shape[0])
aval = ShapedArray(shape, dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def broadcasted_iota(dtype: DType, shape: Shape, dimension: int) -> Array:
dtype = dtypes.canonicalize_dtype(dtype)
shape = canonicalize_shape(shape)
dimension = int(dimension)
return broadcast_in_dim(iota(dtype, shape[dimension]), shape, [dimension])
def _eye(dtype: DType, shape: Shape, offset: int) -> Array:
N, M = tuple(map(int, shape))
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.eye(dtype, (N, M), offset)
aval = ShapedArray((N, M), dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def _delta(dtype: DType, shape: Shape, axes: Sequence[int]) -> Array:
shape = tuple(map(int, shape))
axes = tuple(map(int, axes))
dtype = dtypes.canonicalize_dtype(dtype)
base_shape = tuple(onp.take(shape, axes))
lazy_expr = lazy.broadcast(lazy.delta(dtype, base_shape), shape, axes)
aval = ShapedArray(shape, dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def _tri(dtype: DType, shape: Shape, offset: int) -> Array:
N, M = tuple(map(int, shape))
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.tri(dtype, (N, M), offset)
aval = ShapedArray((N, M), dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
def stop_gradient(x):
return tree_map(ad_util.stop_gradient_p.bind, x)
### convenience wrappers around traceables
def conv(lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: str, precision: Optional[PrecisionType] = None) -> Array:
pads = padtype_to_pads(lhs.shape[2:], rhs.shape[2:], window_strides, padding)
return conv_general_dilated(lhs, rhs, window_strides, padding,
precision=precision)
def conv_with_general_padding(lhs: Array, rhs: Array,
window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]],
rhs_dilation: Optional[Sequence[int]],
precision: Optional[PrecisionType] = None) -> Array:
return conv_general_dilated(
lhs, rhs, window_strides, padding, lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation, precision=precision)
def _conv_transpose_padding(k, s, padding):
if padding == 'SAME':
pad_len = k + s - 2
if s > k - 1:
pad_a = k - 1
else:
pad_a = int(onp.ceil(pad_len / 2))
elif padding == 'VALID':
pad_len = k + s - 2 + _max(k - s, 0)
pad_a = k - 1
else:
raise ValueError('Padding mode must be `SAME` or `VALID`.')
pad_b = pad_len - pad_a
return pad_a, pad_b
def _flip_axes(x, axes):
for axis in axes:
x = onp.flip(x, axis)
return x
def conv_transpose(lhs: Array, rhs: Array, strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
transpose_kernel: bool = False,
precision: Optional[PrecisionType] = None) -> Array:
assert len(lhs.shape) == len(rhs.shape) and len(lhs.shape) > 2
ndims = len(lhs.shape)
one = (1,) * (ndims - 2)
# Set dimensional layout defaults if not specified.
if dimension_numbers is None:
if ndims == 3:
dimension_numbers = ('NHC', 'HIO', 'NHC')
elif ndims == 4:
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
elif ndims == 5:
dimension_numbers = ('NHWDC', 'HWDIO', 'NHWDC')
else:
raise ValueError('No 4+ dimensional dimension_number defaults.')
dn = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
k_shape = onp.take(rhs.shape, dn.rhs_spec)
k_sdims = k_shape[2:]
# Calculate correct output shape given padding and strides.
pads: Union[str, Sequence[Tuple[int, int]]]
if padding in {'SAME', 'VALID'}:
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
effective_k_size = map(lambda k, r: (k-1) * r + 1, k_sdims, rhs_dilation)
pads = [_conv_transpose_padding(k, s, padding)
for k,s in zip(effective_k_size, strides)]
else:
pads = padding
if transpose_kernel:
# flip spatial dims and swap input / output channel axes
rhs = _flip_axes(rhs, onp.array(dn.rhs_spec)[2:])
rhs = onp.swapaxes(rhs, dn.rhs_spec[0], dn.rhs_spec[1])
return conv_general_dilated(lhs, rhs, one, pads, strides, rhs_dilation, dn,
precision=precision)
def full_like(x: Array, fill_value: Array, dtype: Optional[DType] = None,
shape: Optional[Shape] = None) -> Array:
fill_shape = onp.shape(x) if shape is None else canonicalize_shape(shape)
fill_value = tie_in(x, fill_value)
return full(fill_shape, fill_value, dtype or _dtype(x))
def collapse(operand: Array, start_dimension: int, stop_dimension: int) -> Array:
lo, hi = start_dimension, stop_dimension
size = prod(operand.shape[lo:hi])
new_shape = operand.shape[:lo] + (size,) + operand.shape[hi:]
return reshape(operand, new_shape)
def slice_in_dim(operand: Array, start_index: Optional[int],
limit_index: Optional[int],
stride: int = 1, axis: int = 0)-> Array:
start_indices = [0] * operand.ndim
limit_indices = list(operand.shape)
strides = [1] * operand.ndim
# translate `None`
len_axis = operand.shape[axis]
start_index_int = int(start_index) if start_index is not None else 0
limit_index_int = int(limit_index) if limit_index is not None else len_axis
# translate negative indices
if start_index_int < 0:
start_index_int = start_index_int + len_axis
if limit_index_int < 0:
limit_index_int = limit_index_int + len_axis
axis = int(axis)
start_indices[axis] = start_index_int
limit_indices[axis] = limit_index_int
strides[axis] = int(stride)
return slice(operand, start_indices, limit_indices, strides)
def index_in_dim(operand: Array, index: int, axis: int = 0,
keepdims: bool = True) -> Array:
index, axis = int(index), int(axis)
axis_size = operand.shape[axis]
wrapped_index = index + axis_size if index < 0 else index
if not 0 <= wrapped_index < axis_size:
msg = 'index {} is out of bounds for axis {} with size {}'
raise IndexError(msg.format(index, axis, axis_size))
result = slice_in_dim(operand, wrapped_index, wrapped_index + 1, 1, axis)
if keepdims:
return result
else:
return reshape(result, onp.delete(operand.shape, axis))
def dynamic_slice_in_dim(operand: Array, start_index: Array,
slice_size: int, axis: int = 0) -> Array:
start_indices = [_zero(start_index)] * operand.ndim
slice_sizes = list(operand.shape)
axis = int(axis)
start_indices[axis] = start_index
slice_sizes[axis] = int(slice_size)
return dynamic_slice(operand, start_indices, slice_sizes)
def dynamic_index_in_dim(operand: Array, index: Array, axis: int = 0,
keepdims: bool = True) -> Array:
result = dynamic_slice_in_dim(operand, index, 1, axis)
if keepdims:
return result
else:
return reshape(result, onp.delete(operand.shape, axis))
def dynamic_update_slice_in_dim(operand: Array, update: Array,
start_index: Array, axis: int) -> Array:
axis = int(axis)
start_indices = [_zero(start_index)] * _ndim(operand)
start_indices[axis] = start_index
return dynamic_update_slice(operand, update, start_indices)
def dynamic_update_index_in_dim(operand: Array, update: Array, index: Array,
axis: int) -> Array:
axis = int(axis)
if _ndim(update) != _ndim(operand):
assert _ndim(update) + 1 == _ndim(operand)
ax = axis % _ndim(operand)
update = reshape(update, operand.shape[:ax] + (1,) + operand.shape[ax+1:])
return dynamic_update_slice_in_dim(operand, update, index, axis)
def batch_matmul(lhs: Array, rhs: Array,
precision: Optional[PrecisionType] = None) -> Array:
if _min(lhs.ndim, rhs.ndim) < 2:
raise ValueError('Arguments to batch_matmul must be at least 2D, got {}, {}'
.format(lhs.ndim, rhs.ndim))
if lhs.ndim != rhs.ndim:
raise ValueError('Arguments to batch_matmul must have same ndim, got {}, {}'
.format(lhs.ndim, rhs.ndim))
lhs_contract = (lhs.ndim - 1,)
rhs_contract = (rhs.ndim - 2,)
batch = tuple(range(lhs.ndim - 2))
return dot_general(lhs, rhs, ((lhs_contract, rhs_contract), (batch, batch)),
precision=precision)
# These functions also exist in the XLA client library, but we treat them
# as non-primitive to maintain a smaller set of autodiff primitives.
def square(x: Array) -> Array:
return integer_pow(x, 2)
def reciprocal(x: Array) -> Array:
return integer_pow(x, -1)
def _upcast_fp16_for_computation(f):
@functools.wraps(f)
def f_wrapped(x):
dtype = _dtype(x)
if dtype == onp.float16 or dtype == dtypes.bfloat16:
return convert_element_type(
f(convert_element_type(x, onp.float32)), dtype)
return f(x)
return f_wrapped
@api.jit
@_upcast_fp16_for_computation
def tan(x: Array) -> Array:
return div(sin(x), cos(x))
@api.jit
def asin(x: Array) -> Array:
return mul(_const(x, 2),
atan2(x, add(_const(x, 1), sqrt(sub(_const(x, 1), square(x))))))
@api.jit
def acos(x: Array) -> Array:
return select(
ne(x, _const(x, -1.0)),
mul(_const(x, 2),
atan2(sqrt(sub(_const(x, 1), square(x))), add(_const(x, 1), x))),
full_like(x, onp.pi))
def atan(x: Array) -> Array:
return atan2(x, _const(x, 1))
def sinh(x: Array) -> Array:
return sinh_p.bind(x)
def cosh(x: Array) -> Array:
return cosh_p.bind(x)
def asinh(x: Array) -> Array:
return asinh_p.bind(x)
def acosh(x: Array) -> Array:
return acosh_p.bind(x)
def atanh(x: Array) -> Array:
return atanh_p.bind(x)
# Add some methods to ShapedArray that rely on lax primitives
ShapedArray.broadcast = core.aval_method(broadcast)
ShapedArray.transpose = core.aval_method(transpose) # clobbered by lax_numpy
ShapedArray.reshape = core.aval_method(reshape) # clobbered by lax_numpy
def _iter(tracer):
if tracer.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
n = tracer.shape[0]
# return (index_in_dim(tracer, i, keepdims=False) for i in range(n))
return iter([index_in_dim(tracer, i, keepdims=False) for i in range(n)])
ShapedArray._iter = staticmethod(_iter)
# Add some ad handlers that use (or could use) lax primitives
def zeros_like_array(x):
return full_like(x, 0)
for t in itertools.chain(dtypes.python_scalar_dtypes.keys(), array_types,
[xla.DeviceArray, pxla.ShardedDeviceArray]):
ad_util.jaxval_adders[t] = add
ad_util.jaxval_zeros_likers[xla.DeviceArray] = zeros_like_array
ad_util.jaxval_zeros_likers[pxla.ShardedDeviceArray] = zeros_like_array
### primitives
_input_dtype = lambda *args, **_: dtypes.canonicalize_dtype(args[0].dtype)
_fixed_dtype = lambda dtype: lambda *args, **kwargs: dtypes.canonicalize_dtype(dtype)
_complex_basetype = lambda dtype: onp.abs(onp.zeros((), dtype)).dtype
def standard_primitive(shape_rule, dtype_rule, name, translation_rule=None):
prim = Primitive(name)
prim.def_impl(partial(xla.apply_primitive, prim))
prim.def_abstract_eval(partial(standard_abstract_eval, prim, shape_rule, dtype_rule))
xla.translations[prim] = translation_rule or partial(standard_translate, name)
return prim
def standard_abstract_eval(prim, shape_rule, dtype_rule, *args, **kwargs):
assert all(isinstance(arg, UnshapedArray) for arg in args), args
least_specialized = _max(
map(type, args), key=operator.attrgetter('array_abstraction_level'))
if least_specialized is ConcreteArray:
return ConcreteArray(prim.impl(*[x.val for x in args], **kwargs))
elif least_specialized is ShapedArray:
return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
elif least_specialized is UnshapedArray:
return UnshapedArray(dtype_rule(*args, **kwargs))
else:
raise TypeError(args, least_specialized)
def standard_translate(name, c, *args, **kwargs):
xla_opname = ''.join(term.capitalize() for term in name.split('_'))
return getattr(xops, xla_opname)(*args, **kwargs)
def unop_dtype_rule(result_dtype, accepted_dtypes, name, aval, **kwargs):
if not any(dtypes.issubdtype(aval.dtype, t) for t in accepted_dtypes):
msg = '{} does not accept dtype {}. Accepted dtypes are subtypes of {}.'
typename = str(onp.dtype(aval.dtype).name)
accepted_typenames = (t.__name__ for t in accepted_dtypes)
raise TypeError(msg.format(name, typename, ', '.join(accepted_typenames)))
return result_dtype(aval.dtype)
def unop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name)
prim = standard_primitive(_attrgetter('shape'), dtype_rule, name,
translation_rule=translation_rule)
batching.defvectorized(prim)
masking.defvectorized(prim)
return prim
standard_unop = partial(unop, _identity)
_attrgetter = lambda name: lambda x, **kwargs: getattr(x, name)
def naryop_dtype_rule(result_dtype, accepted_dtypes, name, *avals, **kwargs):
aval_dtypes = [aval.dtype for aval in avals]
for i, (aval_dtype, types) in enumerate(zip(aval_dtypes, accepted_dtypes)):
if not any(dtypes.issubdtype(aval_dtype, t) for t in types):
msg = ('{} does not accept dtype {} at position {}. '
'Accepted dtypes at position {} are subtypes of {}.')
typename = str(onp.dtype(aval_dtype).name)
typenames = ', '.join(t.__name__ for t in types)
raise TypeError(msg.format(name, typename, i, i, typenames))
_check_same_dtypes(name, False, *aval_dtypes)
return result_dtype(*avals)
def _broadcasting_shape_rule(name, *avals):
shapes = onp.array([aval.shape for aval in avals if aval.shape])
if not shapes.size:
return ()
if len({len(shape) for shape in shapes}) != 1:
msg = '{} got arrays of different rank: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
is_zero = onp.any(shapes == 0, axis=0)
max_shape = onp.max(shapes, axis=0)
result_shape = onp.where(is_zero, 0, max_shape)
if not onp.all((shapes == result_shape) | (shapes == 1)):
msg = '{} got incompatible shapes for broadcasting: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
return tuple(result_shape)
def naryop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(naryop_dtype_rule, result_dtype, accepted_dtypes, name)
shape_rule = partial(_broadcasting_shape_rule, name)
prim = standard_primitive(shape_rule, dtype_rule, name,
translation_rule=translation_rule)
batching.defbroadcasting(prim)
masking.defnaryop(prim)
return prim
standard_naryop = partial(naryop, _input_dtype)
def _broadcast_translate(translate: Callable):
# Decorator for translation rules which adds explicit broadcasting of
# positional arguments. This is necessary only for a handful of primitives
# whose XLA implementations do not support broadcasting.
def _broadcast_array(array, array_shape, result_shape):
if array_shape == result_shape:
return array
bcast_dims = tuple(range(len(result_shape) - len(array_shape),
len(result_shape)))
result = xops.BroadcastInDim(array, result_shape, bcast_dims)
return result
def _broadcasted_translation_rule(c, *args, **kwargs):
shapes = [c.get_shape(arg).dimensions() for arg in args]
result_shape = broadcast_shapes(*shapes)
args = [_broadcast_array(arg, arg_shape, result_shape)
for arg, arg_shape in zip(args, shapes)]
return translate(c, *args, **kwargs)
return _broadcasted_translation_rule
# NOTE(mattjj): this isn't great for orchestrate fwd mode because it means JVPs
# because then we can't trace these ops without shape data.
def _brcast(x, *others):
# We don't need full numpy broadcasting, but otherwise the logic is the same
shapes = tuple(filter(None, map(onp.shape, (x,) + others)))
shape = shapes and broadcast_shapes(*shapes)
if onp.shape(x) != shape:
return _brcast_to(x, shape)
else:
return x
def _brcast_to(x, shape):
x_shape = onp.shape(x)
assert x_shape != shape
if x_shape:
assert len(x_shape) == len(shape)
broadcast_dimensions, = onp.where(onp.equal(x_shape, shape))
squeezed_dimensions, = onp.where(onp.not_equal(x_shape, shape))
inshape = onp.delete(x_shape, squeezed_dimensions)
return broadcast_in_dim(reshape(x, inshape), shape, broadcast_dimensions)
else:
return broadcast(x, shape)
_float = {onp.floating}
_complex = {onp.complexfloating}
_complex_elem_types = {onp.float32, onp.float64}
_int = {onp.integer}
_bool = {onp.bool_}
_num = _int | _float | _complex
_any = _int | _float | _complex | _bool
_bool_or_int = _int | _bool
neg_p = standard_unop(_num, 'neg')
ad.deflinear(neg_p, lambda t: [neg(t)])
def _sign_translation_rule(c, x):
shape = c.get_shape(x)
dtype = shape.numpy_dtype()
if dtypes.issubdtype(dtype, onp.unsignedinteger):
zero = xb.constant(c, onp.array(0, dtype=dtype))
dims = c.get_shape(x).dimensions()
return xops.Select(xops.Eq(x, zero), xops.Broadcast(zero, dims),
xops.Broadcast(xb.constant(c, onp.array(1, dtype=dtype)),
dims))
return xops.Sign(x)
sign_p = standard_unop(_num, 'sign', translation_rule=_sign_translation_rule)
ad.defjvp_zero(sign_p)
nextafter_p = standard_naryop(
[_float, _float], 'nextafter',
translation_rule=lambda c, x1, x2: xops.NextAfter(x1, x2))
floor_p = standard_unop(_float, 'floor')
ad.defjvp_zero(floor_p)
ceil_p = standard_unop(_float, 'ceil')
ad.defjvp_zero(ceil_p)
round_p = standard_unop(_float, 'round')
ad.defjvp_zero(round_p)
is_finite_p = unop(_fixed_dtype(onp.bool_), _float, 'is_finite')
ad.defjvp_zero(is_finite_p)
exp_p = standard_unop(_float | _complex, 'exp')
ad.defjvp2(exp_p, lambda g, ans, x: mul(g, ans))
log_p = standard_unop(_float | _complex, 'log')
ad.defjvp(log_p, lambda g, x: div(g, x))
expm1_p = standard_unop(_float | _complex, 'expm1')
ad.defjvp2(expm1_p, lambda g, ans, x: mul(g, add(ans, _one(ans))))
log1p_p = standard_unop(_float | _complex, 'log1p')
ad.defjvp(log1p_p, lambda g, x: div(g, add(x, _one(x))))
tanh_p = standard_unop(_float | _complex, 'tanh')
ad.defjvp2(tanh_p, lambda g, ans, x: mul(g, sub(_one(x), mul(ans, ans))))
sin_p = standard_unop(_float | _complex, 'sin')
ad.defjvp(sin_p, lambda g, x: mul(g, cos(x)))
cos_p = standard_unop(_float | _complex, 'cos')
ad.defjvp(cos_p, lambda g, x: neg(mul(g, sin(x))))
atan2_p = standard_naryop([_float, _float], 'atan2')
ad.defjvp(atan2_p,
lambda g, x, y: _brcast(g, y) * (y / (square(x) + square(y))),
lambda g, x, y: _brcast(g, x) * -x / (square(x) + square(y)))
sinh_p = standard_unop(_float | _complex, 'sinh')
ad.defjvp(sinh_p, lambda g, x: mul(g, cosh(x)))
cosh_p = standard_unop(_float | _complex, 'cosh')
ad.defjvp(cosh_p, lambda g, x: mul(g, sinh(x)))
asinh_p = standard_unop(_float | _complex, 'asinh')
ad.defjvp(asinh_p, lambda g, x: mul(g, rsqrt(square(x) + _one(x))))
acosh_p = standard_unop(_float | _complex, 'acosh')
ad.defjvp(acosh_p,
lambda g, x: mul(g, rsqrt((x - _one(x)) * (x + _one(x)))))
atanh_p = standard_unop(_float | _complex, 'atanh')
ad.defjvp(atanh_p,
lambda g, x: mul(g, reciprocal((_one(x) - x) * (_one(x) + x))))
regularized_incomplete_beta_p = standard_naryop(
[_float, _float, _float], 'regularized_incomplete_beta',
translation_rule=_broadcast_translate(
partial(standard_translate, 'regularized_incomplete_beta')))
def betainc_gradx(g, a, b, x):
lbeta = lgamma(a) + lgamma(b) - lgamma(a + b)
partial_x = exp((b - 1) * log1p(-x) +
(a - 1) * log(x) - lbeta)
return partial_x * g
def betainc_grad_not_implemented(g, a, b, x):
raise ValueError("Betainc gradient with respect to a and b not supported.")
ad.defjvp(regularized_incomplete_beta_p,
betainc_grad_not_implemented,
betainc_grad_not_implemented,
betainc_gradx)
lgamma_p = standard_unop(_float, 'lgamma')
ad.defjvp(lgamma_p, lambda g, x: mul(g, digamma(x)))
digamma_p = standard_unop(_float, 'digamma')
igamma_p = standard_naryop(
[_float, _float], 'igamma',
translation_rule=_broadcast_translate(partial(standard_translate, 'igamma')))
igamma_grad_a_p = standard_naryop([_float, _float], 'igamma_grad_a',
translation_rule=_broadcast_translate(partial(standard_translate,
'igamma_grad_a')))
def igamma_gradx(g, a, x):
return _brcast(g, a, x) * exp(-x + (a - _ones(a)) * log(x) - lgamma(a))
def igamma_grada(g, a, x):
return _brcast(g, a, x) * igamma_grad_a(a, x)
ad.defjvp(igamma_p, igamma_grada, igamma_gradx)
igammac_p = standard_naryop(
[_float, _float], 'igammac',
translation_rule=_broadcast_translate(partial(standard_translate, 'igammac')))
def igammac_gradx(g, a, x):
return -igamma_gradx(g, a, x)
def igammac_grada(g, a, x):
return -igamma_grada(g, a, x)
ad.defjvp(igammac_p, igammac_grada, igammac_gradx)
bessel_i0e_p = standard_unop(_float, 'bessel_i0e')
ad.defjvp2(bessel_i0e_p, lambda g, y, x: g * (bessel_i1e(x) - sign(x) * y))
bessel_i1e_p = standard_unop(_float, 'bessel_i1e')
def _bessel_i1e_jvp(g, y, x):
eps = dtypes.finfo(_dtype(x)).eps
x_is_not_tiny = abs(x) > eps
safe_x = select(x_is_not_tiny, x, full_like(x, eps))
dy_dx = bessel_i0e(safe_x) - y * (sign(safe_x) + reciprocal(safe_x))
dy_dx = select(x_is_not_tiny, dy_dx, full_like(x, 0.5))
return g * dy_dx
ad.defjvp2(bessel_i1e_p, _bessel_i1e_jvp)
erf_p = standard_unop(_float, 'erf')
ad.defjvp(erf_p, lambda g, x: mul(_const(x, 2. / onp.sqrt(onp.pi)),
mul(g, exp(neg(square(x))))))
erfc_p = standard_unop(_float, 'erfc')
ad.defjvp(erfc_p, lambda g, x: mul(_const(x, 2. / onp.sqrt(onp.pi)),
mul(neg(g), exp(neg(square(x))))))
erf_inv_p = standard_unop(_float, 'erf_inv')
ad.defjvp2(erf_inv_p, lambda g, ans, x: mul(_const(x, onp.sqrt(onp.pi) / 2.),
mul(g, exp(square(ans)))))
real_p = unop(_complex_basetype, _complex, 'real')
ad.deflinear(real_p, lambda t: [complex(t, onp.zeros((), _dtype(t)))])
imag_p = unop(_complex_basetype, _complex, 'imag')
ad.defjvp(imag_p, lambda g, _: real(mul(_const(g, -1j), g)))
_complex_dtype = lambda dtype, *args: (onp.zeros((), dtype) + onp.zeros((), onp.complex64)).dtype
complex_p = naryop(_complex_dtype, [_complex_elem_types, _complex_elem_types],
'complex')
ad.deflinear(complex_p, lambda t: [real(t), imag(neg(t))])
conj_p = unop(_complex_dtype, _complex_elem_types | _complex, 'conj')
def _conj_transpose_rule(t, x, *, input_dtype):
assert ad.is_undefined_primal(x)
if dtypes.issubdtype(input_dtype, onp.complexfloating):
return [conj(t)]
else:
return [real(t)]
xla.translations[conj_p] = lambda c, x, **kwargs: xops.Conj(x)
ad.primitive_jvps[conj_p] = partial(ad.linear_jvp, conj_p)
ad.primitive_transposes[conj_p] = _conj_transpose_rule
abs_p = unop(_complex_basetype, _num, 'abs')
def _abs_jvp_rule(g, ans, x):
if _iscomplex(x):
return _maybe_real(mul(g, div(_maybe_conj(x),
_replace_zero(convert_element_type(ans, _dtype(x))))))
else:
return select(ge(x, _zero(x)), g, neg(g))
ad.defjvp2(abs_p, _abs_jvp_rule)
_maybe_conj = lambda x: conj(x) if _iscomplex(x) else x
_maybe_real = lambda x: real(x) if _iscomplex(x) else x
sqrt_p = standard_unop(_float | _complex, 'sqrt')
ad.defjvp2(sqrt_p, lambda g, ans, x: mul(g, div(_const(x, 0.5), ans)))
rsqrt_p = standard_unop(_float | _complex, 'rsqrt')
ad.defjvp2(rsqrt_p,
lambda g, ans, x:
mul(g, mul(_const(x, -0.5), pow(x, _const(x, -1.5)))))
pow_p = standard_naryop([_float | _complex, _float | _complex], 'pow')
def _pow_jvp_lhs(g, ans, x, y):
jac = mul(y, pow(x, select(eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))
return mul(_brcast(g, y), jac)
def _pow_jvp_rhs(g, ans, x, y):
return mul(_brcast(g, x), mul(log(_replace_zero(x)), ans))
ad.defjvp2(pow_p, _pow_jvp_lhs, _pow_jvp_rhs)
def _integer_pow_dtype_rule(x, *, y):
dtype = unop_dtype_rule(_identity, _int | _float | _complex, 'integer_pow', x)
if y < 0 and dtypes.issubdtype(dtype, onp.integer):
raise TypeError("Integers cannot be raised to negative powers, got "
f"integer_pow({x}, {y})")
return dtype
def _integer_pow_translation_rule(c, x, *, y):
if y == 0:
shape = c.get_shape(x)
return xb.constant(c, onp.array(1, dtype=shape.numpy_dtype()))
is_reciprocal = y < 0
if is_reciprocal:
y = -y
acc = None
while y > 0:
if y & 1:
acc = x if acc is None else xops.Mul(acc, x)
y >>= 1
if y > 0:
x = xops.Mul(x, x)
return xops.Reciprocal(acc) if is_reciprocal else acc
def _integer_pow_jvp(g, x, *, y):
return g if y == 0 else mul(g, mul(_const(x, y), integer_pow(x, y - 1)))
integer_pow_p = standard_primitive(
_attrgetter('shape'), _integer_pow_dtype_rule, 'integer_pow',
translation_rule=_integer_pow_translation_rule)
batching.defvectorized(integer_pow_p)
masking.defvectorized(integer_pow_p)
ad.defjvp(integer_pow_p, _integer_pow_jvp)
_replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x)
not_p = standard_unop(_bool_or_int, 'not')
and_p = standard_naryop([_bool_or_int, _bool_or_int], 'and')
ad.defjvp_zero(and_p)
or_p = standard_naryop([_bool_or_int, _bool_or_int], 'or')
ad.defjvp_zero(or_p)
xor_p = standard_naryop([_bool_or_int, _bool_or_int], 'xor')
ad.defjvp_zero(xor_p)
population_count_p = standard_unop(_bool_or_int, 'population_count')
def _add_transpose(t, x, y):
# assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)
return [t, t]
add_p = standard_naryop([_num, _num], 'add')
ad.defjvp(add_p, lambda g, x, y: _brcast(g, y), lambda g, x, y: _brcast(g, x))
ad.primitive_transposes[add_p] = _add_transpose
def _sub_transpose(t, x, y):
# The following linearity assertion is morally true, but because in some cases
# we instantiate zeros for convenience, it doesn't always hold.
return [t, neg(t) if t is not ad_util.zero else ad_util.zero]
sub_p = standard_naryop([_num, _num], 'sub')
ad.defjvp(sub_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: _brcast(neg(g), x))
ad.primitive_transposes[sub_p] = _sub_transpose
mul_p = standard_naryop([_num, _num], 'mul')
ad.defbilinear_broadcasting(_brcast, mul_p, mul, mul)
def _div_transpose_rule(cotangent, x, y):
assert ad.is_undefined_primal(x) and not ad.is_undefined_primal(y)
res = ad_util.zero if cotangent is ad_util.zero else div(cotangent, y)
return res, None
div_p = standard_naryop([_num, _num], 'div')
ad.defjvp(div_p,
lambda g, x, y: div(_brcast(g, y), y),
lambda g, x, y: mul(mul(neg(_brcast(g, x)), x), integer_pow(y, -2)))
ad.primitive_transposes[div_p] = _div_transpose_rule
rem_p = standard_naryop([_num, _num], 'rem')
ad.defjvp(rem_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: mul(_brcast(neg(g), x), floor(div(x, y))))
def _broadcasting_select(c, which, x, y):
which_shape, x_shape, y_shape = (
c.get_shape(t).dimensions() for t in (which, x, y))
out_shape = broadcast_shapes(which_shape, x_shape, y_shape)
bcast_dims = lambda shape: tuple(range(len(out_shape) - len(shape),
len(out_shape)))
which = xops.BroadcastInDim(which, out_shape, bcast_dims(which_shape))
x = xops.BroadcastInDim(x, out_shape, bcast_dims(x_shape))
y = xops.BroadcastInDim(y, out_shape, bcast_dims(y_shape))
return xops.Select(which, x, y)
def _minmax_translation_rule(c, x, y, *, minmax=None, cmp=None):
dtype = c.get_shape(x).numpy_dtype()
if dtypes.issubdtype(dtype, onp.complexfloating):
rx = xops.Real(x)
ry = xops.Real(y)
return _broadcasting_select(
c, xops.Select(xops.Eq(rx, ry), cmp(xops.Imag(x), xops.Imag(y)),
cmp(rx, ry)),
x, y)
return minmax(x, y)
max_p = standard_naryop([_any, _any], 'max', translation_rule=partial(
_minmax_translation_rule, minmax=xops.Max, cmp=xops.Gt))
ad.defjvp2(max_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
min_p = standard_naryop([_any, _any], 'min', translation_rule=partial(
_minmax_translation_rule, minmax=xops.Min, cmp=xops.Lt))
ad.defjvp2(min_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
shift_left_p = standard_naryop([_int, _int], 'shift_left')
ad.defjvp_zero(shift_left_p)
shift_right_arithmetic_p = standard_naryop([_int, _int], 'shift_right_arithmetic')
ad.defjvp_zero(shift_right_arithmetic_p)
shift_right_logical_p = standard_naryop([_int, _int], 'shift_right_logical')
ad.defjvp_zero(shift_right_logical_p)
eq_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'eq')
ad.defjvp_zero(eq_p)
ne_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'ne')
ad.defjvp_zero(ne_p)
ge_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'ge')
ad.defjvp_zero(ge_p)
gt_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'gt')
ad.defjvp_zero(gt_p)
le_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'le')
ad.defjvp_zero(le_p)
lt_p = naryop(_fixed_dtype(onp.bool_), [_any, _any], 'lt')
ad.defjvp_zero(lt_p)
def _convert_element_type_shape_rule(operand, *, new_dtype, old_dtype):
return operand.shape
def _convert_element_type_dtype_rule(operand, *, new_dtype, old_dtype):
return new_dtype
def _convert_element_type_translation_rule(c, operand, *, new_dtype, old_dtype):
if (dtypes.issubdtype(old_dtype, onp.complexfloating) and
not dtypes.issubdtype(new_dtype, onp.complexfloating)):
operand = xops.Real(operand)
new_etype = xla_client.dtype_to_etype(new_dtype)
return xops.ConvertElementType(operand, new_element_type=new_etype)
def _convert_element_type_transpose_rule(t, *, new_dtype, old_dtype):
assert t.dtype == new_dtype, (t.dtype, new_dtype)
return [convert_element_type_p.bind(t, new_dtype=old_dtype,
old_dtype=new_dtype)]
convert_element_type_p = standard_primitive(
_convert_element_type_shape_rule, _convert_element_type_dtype_rule,
'convert_element_type', _convert_element_type_translation_rule)
ad.deflinear(convert_element_type_p, _convert_element_type_transpose_rule)
batching.defvectorized(convert_element_type_p)
masking.defvectorized(convert_element_type_p)
def _bitcast_convert_type_shape_rule(operand, *, new_dtype):
return operand.shape
def _bitcast_convert_type_dtype_rule(operand, *, new_dtype):
return new_dtype
def _bitcast_convert_type_translation_rule(c, operand, *, new_dtype):
new_etype = xla_bridge.dtype_to_etype(new_dtype)
return xops.BitcastConvertType(operand, new_element_type=new_etype)
bitcast_convert_type_p = standard_primitive(
_bitcast_convert_type_shape_rule, _bitcast_convert_type_dtype_rule,
'bitcast_convert_type', _bitcast_convert_type_translation_rule)
ad.defjvp_zero(bitcast_convert_type_p)
batching.defvectorized(bitcast_convert_type_p)
masking.defvectorized(bitcast_convert_type_p)
def _conv_general_dilated_shape_rule(
lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
**unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
if not feature_group_count > 0:
msg = ("conv_general_dilated feature_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(feature_group_count))
lhs_feature_count = lhs.shape[dimension_numbers.lhs_spec[1]]
quot, rem = divmod(lhs_feature_count, feature_group_count)
if rem:
msg = ("conv_general_dilated feature_group_count must divide lhs feature "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(feature_group_count, lhs_feature_count))
if quot != rhs.shape[dimension_numbers.rhs_spec[1]]:
msg = ("conv_general_dilated lhs feature dimension size divided by "
"feature_group_count must equal the rhs input feature dimension "
"size, but {} // {} != {}.")
raise ValueError(msg.format(lhs_feature_count, feature_group_count,
rhs.shape[dimension_numbers.rhs_spec[1]]))
if rhs.shape[dimension_numbers.rhs_spec[0]] % feature_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of feature_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
feature_group_count))
if not batch_group_count > 0:
msg = ("conv_general_dilated batch_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(batch_group_count))
lhs_batch_count = lhs.shape[dimension_numbers.lhs_spec[0]]
if lhs_batch_count % batch_group_count != 0:
msg = ("conv_general_dilated batch_group_count must divide lhs batch "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(batch_group_count, lhs_batch_count))
if rhs.shape[dimension_numbers.rhs_spec[0]] % feature_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of batch_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
batch_ground_count))
if not batch_group_count > 0 and feature_group_count > 0:
msg = ("At most one of batch_group_count and feature_group_count may be > "
"1, got batch_group_count={} and feature_group_count={}")
raise ValueError(msg.format(batch_group_count, feature_group_count))
lhs_perm, rhs_perm, out_perm = dimension_numbers
lhs_trans = _dilate_shape(onp.take(lhs.shape, lhs_perm), lhs_dilation)
rhs_trans = _dilate_shape(onp.take(rhs.shape, rhs_perm), rhs_dilation)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding,
batch_group_count)
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def _conv_general_dilated_dtype_rule(
lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, **unused_kwargs):
return naryop_dtype_rule(_input_dtype, [_float, _float],
'conv_general_dilated', lhs, rhs)
_conv_spec_transpose = lambda spec: (spec[1], spec[0]) + spec[2:]
_conv_sdims = lambda spec: spec[2:]
def _conv_general_dilated_transpose_lhs(
g, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
assert batch_group_count == 1 or feature_group_count == 1
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
t_rhs_spec = _conv_spec_transpose(rhs_spec)
if feature_group_count > 1:
rhs = _reshape_axis_out_of(rhs_spec[0], feature_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
elif batch_group_count > 1:
rhs = _reshape_axis_out_of(rhs_spec[0], batch_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
feature_group_count = batch_group_count
trans_dimension_numbers = ConvDimensionNumbers(out_spec, t_rhs_spec, lhs_spec)
padding = _conv_general_vjp_lhs_padding(
onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims),
window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
revd_weights = rev(rhs, rhs_sdims)
out = conv_general_dilated(
g, revd_weights, window_strides=lhs_dilation, padding=padding,
lhs_dilation=window_strides, rhs_dilation=rhs_dilation,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=1, precision=precision)
if batch_group_count > 1:
out = _reshape_axis_out_of(lhs_spec[1], batch_group_count, out)
out = _reshape_axis_into(lhs_spec[1], lhs_spec[0], out)
return out
def _conv_general_dilated_transpose_rhs(
g, lhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers: ConvDimensionNumbers, feature_group_count: int,
batch_group_count: int, lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
if onp.size(g) == 0:
# Avoids forming degenerate convolutions where the RHS has spatial size 0.
return ad_util.zero
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_trans, rhs_trans, out_trans = map(_conv_spec_transpose, dimension_numbers)
assert batch_group_count == 1 or feature_group_count == 1
if batch_group_count > 1:
feature_group_count = batch_group_count
batch_group_count = 1
elif feature_group_count > 1:
batch_group_count = feature_group_count
feature_group_count = 1
trans_dimension_numbers = ConvDimensionNumbers(lhs_trans, out_trans, rhs_trans)
padding = _conv_general_vjp_rhs_padding(
onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims),
window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
return conv_general_dilated(
lhs, g, window_strides=rhs_dilation, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=window_strides,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count, precision=precision)
def _conv_general_dilated_translation_rule(
c, lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count, precision,
**unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
dimension_numbers = _conv_general_proto(dimension_numbers)
return xops.ConvGeneralDilated(lhs, rhs, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision_config=_precision_config(precision))
def _conv_general_dilated_batch_rule(
batched_args, batch_dims, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count, precision, **unused_kwargs):
assert batch_group_count == 1 or feature_group_count == 1
lhs, rhs = batched_args
lhs_bdim, rhs_bdim = batch_dims
lhs_spec, rhs_spec, out_spec = dimension_numbers
if lhs_bdim is not None and rhs_bdim is not None:
assert lhs.shape[lhs_bdim] == rhs.shape[rhs_bdim]
if batch_group_count > 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
batch_group_count *= lhs.shape[lhs_bdim]
else:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[1], lhs)
feature_group_count *= lhs.shape[lhs_bdim]
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(
new_lhs, new_rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], lhs.shape[lhs_bdim], out)
return out, out_spec[1]
elif lhs_bdim is not None:
if batch_group_count == 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, precision=precision)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
else:
new_lhs = _reshape_axis_out_of(lhs_spec[0] + int(lhs_bdim <= lhs_spec[0]),
batch_group_count, lhs)
new_lhs = _reshape_axis_into(lhs_bdim + int(lhs_spec[0] < lhs_bdim),
lhs_spec[0] + 1,
new_lhs)
new_lhs = _reshape_axis_into(lhs_spec[0], lhs_spec[0], new_lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
elif rhs_bdim is not None:
if feature_group_count == 1 and batch_group_count == 1:
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], rhs.shape[rhs_bdim], out)
return out, out_spec[1]
else:
# groups need to be outermost, so we need to factor them out of the
# rhs output feature dim, then factor the batch dim into the remaining rhs
# output feature dim, then put groups back in. We do something
# similar on the output. An alternative which would require more FLOPs but
# fewer reshapes would be to broadcast lhs.
group_count = (feature_group_count if feature_group_count > 1
else batch_group_count)
new_rhs = _reshape_axis_out_of(rhs_spec[0] + int(rhs_bdim <= rhs_spec[0]),
group_count, rhs)
new_rhs = _reshape_axis_into(rhs_bdim + int(rhs_spec[0] < rhs_bdim),
rhs_spec[0] + 1,
new_rhs)
new_rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[0], new_rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], group_count, out)
out = _reshape_axis_out_of(out_spec[1] + 1, rhs.shape[rhs_bdim], out)
out = _reshape_axis_into(out_spec[1], out_spec[1] + 1, out)
return out, out_spec[1]
conv_general_dilated_p = standard_primitive(
_conv_general_dilated_shape_rule, _conv_general_dilated_dtype_rule,
'conv_general_dilated', _conv_general_dilated_translation_rule)
ad.defbilinear(conv_general_dilated_p,
_conv_general_dilated_transpose_lhs,
_conv_general_dilated_transpose_rhs)
batching.primitive_batchers[conv_general_dilated_p] = \
_conv_general_dilated_batch_rule
def _reshape_axis_into(src, dst, x):
perm = [i for i in range(x.ndim) if i != src]
perm.insert(dst, src)
new_shape = list(onp.delete(x.shape, src))
new_shape[dst] *= x.shape[src]
return reshape(x, new_shape, perm)
def _reshape_axis_out_of(src, size1, x):
shape = list(x.shape)
size2, ragged = divmod(shape[src], size1)
assert not ragged
shape[src:src+1] = [size1, size2]
return reshape(x, shape)
def _precision_config(precision):
if precision is not None:
config = xla_client.PrecisionConfig()
config.operand_precision.extend((precision, precision))
return config
return None
def _dot_general_shape_rule(lhs, rhs, *, dimension_numbers, precision):
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
if len(lhs_batch) != len(rhs_batch):
msg = ("dot_general requires equal numbers of lhs_batch and rhs_batch "
"dimensions, got lhs_batch {} and rhs_batch {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
if not onp.all(onp.equal(lhs_batch, rhs_batch)):
msg = ("dot_general requires same lhs and rhs batch dimension numbers, "
"got {} and {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
lhs_batch_shape = onp.take(lhs.shape, lhs_batch)
rhs_batch_shape = onp.take(rhs.shape, rhs_batch)
if not onp.all(onp.equal(lhs_batch_shape, rhs_batch_shape)):
msg = ("dot_general requires lhs batch dimensions and rhs batch dimensions "
"to have the same shape, got {} and {}.")
raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape))
if tuple(sorted(lhs_batch)) != tuple(range(len(lhs_batch))):
msg = ("dot_general requires lhs batch dimensions to precede contracting "
"and non-contracting dimensions, got lhs_batch {}.")
raise TypeError(msg.format(lhs_batch))
if tuple(sorted(rhs_batch)) != tuple(range(len(rhs_batch))):
msg = ("dot_general requires rhs batch dimensions to precede contracting "
"and non-contracting dimensions, got rhs_batch {}.")
raise TypeError(msg.format(rhs_batch))
lhs_contracting_shape = onp.take(lhs.shape, lhs_contracting)
rhs_contracting_shape = onp.take(rhs.shape, rhs_contracting)
if not onp.all(onp.equal(lhs_contracting_shape, rhs_contracting_shape)):
msg = ("dot_general requires contracting dimensions to have the same "
"shape, got {} and {}.")
raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))
batch_shape = tuple(onp.take(lhs.shape, lhs_batch))
lhs_contract_or_batch = tuple(lhs_contracting) + tuple(lhs_batch)
lhs_tensored_shape = tuple(onp.delete(lhs.shape, lhs_contract_or_batch))
rhs_contract_or_batch = tuple(rhs_contracting) + tuple(rhs_batch)
rhs_tensored_shape = tuple(onp.delete(rhs.shape, rhs_contract_or_batch))
return batch_shape + lhs_tensored_shape + rhs_tensored_shape
def _dot_general_dtype_rule(lhs, rhs, *, dimension_numbers, precision):
return naryop_dtype_rule(_input_dtype, [_num, _num], 'dot_general', lhs, rhs)
def _dot_general_transpose_lhs(g, y, *, dimension_numbers, precision,
swap_ans=False):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
x_ndim = g.ndim - y.ndim + len(x_batch) + 2 * len(x_contract)
x_kept = remaining(range(x_ndim), x_contract, x_batch)
y_kept = remaining(range(y.ndim), y_contract, y_batch)
if swap_ans:
ans_batch, ans_y, _ = ranges_like(x_batch, y_kept, x_kept)
else:
ans_batch, _, ans_y = ranges_like(x_batch, x_kept, y_kept)
dims = ((ans_y, y_kept), (ans_batch, y_batch))
x_contract_sorted_by_y = list(onp.take(x_contract, onp.argsort(y_contract)))
out_axes = onp.argsort(list(x_batch) + x_kept + x_contract_sorted_by_y)
return transpose(dot_general(g, y, dims, precision=precision),
tuple(out_axes))
def _dot_general_transpose_rhs(g, x, *, dimension_numbers, precision):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
swapped_dimension_numbers = ((y_contract, x_contract), (y_batch, x_batch))
return _dot_general_transpose_lhs(
g, x, dimension_numbers=swapped_dimension_numbers, precision=precision,
swap_ans=True)
def _dot_general_batch_rule(batched_args, batch_dims, *, dimension_numbers,
precision):
# there are three kinds of dimensions in a dot_general:
# - contraction dimensions appear in lhs and rhs but not the result
# - batch dimensions appear in lhs, rhs, and result
# - tensor product dimensions appear in the result and one of lhs or rhs
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
lhs, rhs = batched_args
lbd, rbd = batch_dims
assert lbd is not None or rbd is not None
if lbd is not None and rbd is not None:
# adding a batch dimension
if lbd != 0:
lhs = batching.moveaxis(lhs, lbd, 0)
if rbd != 0:
rhs = batching.moveaxis(rhs, rbd, 0)
lhs_batch = (0,) + tuple(onp.add(1, lhs_batch))
rhs_batch = (0,) + tuple(onp.add(1, rhs_batch))
lhs_contract = tuple(onp.add(1, lhs_contract))
rhs_contract = tuple(onp.add(1, rhs_contract))
result_batch_dim = 0
else:
# adding a tensor product dimension
if lbd is not None:
if lhs_batch == () or lbd > onp.max(lhs_batch):
# can avoid transposes
bump_lhs_contract = onp.greater_equal(lhs_contract, lbd)
lhs_contract = tuple(onp.add(lhs_contract, bump_lhs_contract))
result_batch_dim = lbd - len(lhs_contract) + sum(bump_lhs_contract)
else:
# move the new dimension to the end of lhs to avoid changing batch dims
lhs = batching.moveaxis(lhs, lbd, lhs.ndim - 1)
# lhs tensor product dims in result come after batch dims
result_batch_dim = lhs.ndim - len(lhs_contract) - 1
else:
if rhs_batch == () or rbd > onp.max(rhs_batch):
# can avoid transposes
bump_rhs_contract = onp.greater_equal(rhs_contract, rbd)
rhs_contract = tuple(onp.add(rhs_contract, bump_rhs_contract))
result_batch_dim = (rbd + (lhs.ndim - len(lhs_contract) - len(lhs_batch))
- (len(rhs_contract) - sum(bump_rhs_contract)))
else:
# move the new dimension to the end of rhs to avoid changing batch dims
rhs = batching.moveaxis(rhs, rbd, rhs.ndim - 1)
# rhs tensor product dims in result come after batch dims + lhs tensor
# product dims
result_batch_dim = (lhs.ndim - len(lhs_contract) - len(lhs_batch) +
rhs.ndim - len(rhs_contract) - 1)
new_dimension_numbers = [(lhs_contract, rhs_contract), (lhs_batch, rhs_batch)]
batched_out = dot_general(lhs, rhs, new_dimension_numbers,
precision=precision)
return batched_out, int(result_batch_dim)
def _dot_general_translation_rule(c, lhs, rhs, *, dimension_numbers, precision):
return xops.DotGeneral(lhs, rhs,
xc.make_dot_dimension_numbers(dimension_numbers),
precision_config=_precision_config(precision))
def _dot_general_masking_rule(padded_vals, logical_shapes, *, dimension_numbers,
precision):
lhs, rhs = padded_vals
lhs_shape, rhs_shape = logical_shapes
lhs_ndim, rhs_ndim = len(lhs_shape), len(rhs_shape)
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
# we need only mask the lhs contraction dimensions
if len(lhs_contract) == 0:
return dot_general(lhs, rhs, dimension_numbers, precision=precision)
else:
masks = [broadcasted_iota(onp.int32, lhs.shape, d) < lhs_shape[d]
for d in lhs_contract]
mask_intersection = masks[0]
for mask in masks[1:]:
mask_intersection &= mask
masked_lhs = select(mask_intersection, lhs, zeros_like_array(lhs))
return dot_general(masked_lhs, rhs, dimension_numbers, precision=precision)
dot_general_p = standard_primitive(_dot_general_shape_rule,
_dot_general_dtype_rule, 'dot_general',
_dot_general_translation_rule)
ad.defbilinear(dot_general_p,
_dot_general_transpose_lhs, _dot_general_transpose_rhs)
batching.primitive_batchers[dot_general_p] = _dot_general_batch_rule
masking.masking_rules[dot_general_p] = _dot_general_masking_rule
def _broadcast_shape_rule(operand, sizes):
_check_shapelike('broadcast', 'sizes', sizes)
return tuple(sizes) + operand.shape
def _broadcast_batch_rule(batched_args, batch_dims, *, sizes):
operand, = batched_args
bdim, = batch_dims
new_bdim = None if bdim is None else bdim + len(sizes)
return broadcast(operand, sizes), new_bdim
broadcast_p = standard_primitive(
_broadcast_shape_rule, _input_dtype, 'broadcast')
ad.deflinear(broadcast_p, lambda t, sizes: [_reduce_sum(t, range(len(sizes)))])
batching.primitive_batchers[broadcast_p] = _broadcast_batch_rule
def _broadcast_in_dim_impl(operand, *, shape, broadcast_dimensions):
if type(operand) is xla.DeviceArray:
shape = _broadcast_in_dim_shape_rule(
operand, shape=shape, broadcast_dimensions=broadcast_dimensions)
aval = ShapedArray(shape, _dtype(operand))
lazy_expr = lazy.broadcast(operand._lazy_expr, shape, broadcast_dimensions)
return xla.DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)
else:
return xla.apply_primitive(broadcast_in_dim_p, operand, shape=shape,
broadcast_dimensions=broadcast_dimensions)
def _broadcast_in_dim_shape_rule(operand, *, shape, broadcast_dimensions):
_check_shapelike('broadcast_in_dim', 'shape', shape)
_check_shapelike('broadcast_in_dim', 'broadcast_dimensions',
broadcast_dimensions)
operand_ndim = onp.ndim(operand)
if operand_ndim != len(broadcast_dimensions):
msg = ('broadcast_in_dim broadcast_dimensions must have length equal to '
'operand ndim; got broadcast_dimensions {} for operand ndim {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim))
if len(shape) < operand_ndim:
msg = ('broadcast_in_dim target broadcast shape must have equal or higher rank '
'to the operand shape; got operand ndim {} and target broadcast ndim {}.')
raise TypeError(msg.format(operand_ndim, len(shape)))
if not set(broadcast_dimensions).issubset(set(range(len(shape)))):
msg = ('broadcast_in_dim broadcast_dimensions must be a subset of output '
'dimensions, got {} for operand ndim {} and shape {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim, shape))
if any(operand.shape[i] != 1 and operand.shape[i] != shape[broadcast_dimensions[i]]
for i in range(operand_ndim)):
msg = ('broadcast_in_dim operand dimension sizes must either be 1, or be '
'equal to their corresponding dimensions in the target broadcast shape; '
'got operand of shape {}, target broadcast shape {}, '
'broadcast_dimensions {} ')
raise TypeError(msg.format(operand.shape, shape, broadcast_dimensions))
if (len(broadcast_dimensions) != len(set(broadcast_dimensions)) or
tuple(broadcast_dimensions) != tuple(sorted(broadcast_dimensions))):
msg = ('broadcast_in_dim broadcast_dimensions must be strictly increasing; '
'got broadcast_dimensions {}')
raise TypeError(msg.format(broadcast_dimensions))
return shape
def _broadcast_in_dim_transpose_rule(t, *, shape, broadcast_dimensions):
axes = tuple(onp.delete(range(len(shape)), broadcast_dimensions))
return [_reduce_sum(t, axes)]
def _broadcast_in_dim_batch_rule(batched_args, batch_dims, *, shape,
broadcast_dimensions):
operand, = batched_args
bdim, = batch_dims
new_operand = batching.moveaxis(operand, bdim, 0)
new_shape = (operand.shape[bdim],) + shape
new_broadcast_dimensions = (0,) + tuple(onp.add(1, broadcast_dimensions))
return broadcast_in_dim(new_operand, new_shape, new_broadcast_dimensions), 0
broadcast_in_dim_p = standard_primitive(
_broadcast_in_dim_shape_rule, _input_dtype, 'broadcast_in_dim')
broadcast_in_dim_p.def_impl(_broadcast_in_dim_impl)
ad.deflinear(broadcast_in_dim_p, _broadcast_in_dim_transpose_rule)
batching.primitive_batchers[broadcast_in_dim_p] = _broadcast_in_dim_batch_rule
def _clamp_shape_rule(min, operand, max):
if min.shape and min.shape != operand.shape:
m = "clamp requires min.shape == operand.shape or min.shape == (), got {}."
raise TypeError(m.format(min.shape))
if max.shape and max.shape != operand.shape:
m = "clamp requires max.shape == operand.shape or max.shape == (), got {}."
raise TypeError(m.format(max.shape))
return operand.shape
_clamp_dtype_rule = partial(naryop_dtype_rule, _input_dtype, [_any, _any, _any],
'clamp')
clamp_p = standard_primitive(_clamp_shape_rule, _clamp_dtype_rule, 'clamp')
ad.defjvp(clamp_p,
lambda g, min, operand, max:
select(bitwise_and(gt(min, operand), lt(min, max)),
_brcast(g, operand), _zeros(operand)),
lambda g, min, operand, max:
select(bitwise_and(gt(operand, min), lt(operand, max)),
g, _zeros(operand)),
lambda g, min, operand, max:
select(lt(max, operand), _brcast(g, operand), _zeros(operand)))
def _concatenate_shape_rule(*operands, **kwargs):
dimension = kwargs.pop('dimension')
if not operands:
msg = "concatenate expects at least one operand, got 0."
raise TypeError(msg)
if not all(isinstance(operand, UnshapedArray) for operand in operands):
msg = "All objects to concatenate must be arrays, got {}."
op = next(op for op in operands if not isinstance(op, UnshapedArray))
raise TypeError(msg.format(type(op)))
if len(set(operand.ndim for operand in operands)) != 1:
msg = "Cannot concatenate arrays with different ranks, got {}."
raise TypeError(msg.format(", ".join(str(o.ndim) for o in operands)))
shapes = onp.array([operand.shape for operand in operands])
if not 0 <= dimension < shapes.shape[1]:
msg = "concatenate dimension out of bounds: dimension {} for shapes {}."
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
if not onp.all(onp.delete(shapes[0] == shapes, dimension, axis=1)):
msg = ("Cannot concatenate arrays with shapes that differ in dimensions "
"other than the one being concatenated: dimension {} for shapes {}.")
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
concat_size = sum(o.shape[dimension] for o in operands)
ex_shape = operands[0].shape
return ex_shape[:dimension] + (concat_size,) + ex_shape[dimension+1:]
def _concatenate_dtype_rule(*operands, **kwargs):
_check_same_dtypes('concatenate', False, *(o.dtype for o in operands))
return operands[0].dtype
def _concatenate_translation_rule(c, *operands, **kwargs):
dimension = kwargs.pop('dimension')
return xops.ConcatInDim(c, operands, dimension)
def _concatenate_transpose_rule(t, *operands, dimension):
operand_shapes = [o.aval.shape if ad.is_undefined_primal(o) else o.shape
for o in operands]
if t is ad_util.zero:
return [ad_util.zero if ad.is_undefined_primal(o) else None for o in operands]
else:
limit_points = onp.cumsum([shape[dimension] for shape in operand_shapes])
starts = onp.zeros((len(operands), t.ndim), dtype=int)
starts[1:, dimension] = limit_points[:-1]
limits = onp.tile(t.shape, (len(operands), 1))
limits[:, dimension] = limit_points
return [slice(t, start, limit) if ad.is_undefined_primal(o) else None
for o, start, limit in zip(operands, starts, limits)]
def _concatenate_batch_rule(batched_args, batch_dims, *, dimension):
size = next(op.shape[bdim] for op, bdim in zip(batched_args, batch_dims)
if bdim is not None)
operands = [batching.moveaxis(op, bdim, 0) if bdim is not None
else broadcast(op, (size,))
for op, bdim in zip(batched_args, batch_dims)]
return concatenate(operands, dimension + 1), 0
# The concatenate_p masking rule requires use of a while-loop construct and so
# is defined in lax_control_flow.py
concatenate_p = standard_primitive(
_concatenate_shape_rule, _concatenate_dtype_rule, 'concatenate',
_concatenate_translation_rule)
ad.deflinear(concatenate_p, _concatenate_transpose_rule)
ad.primitive_transposes[concatenate_p] = _concatenate_transpose_rule
batching.primitive_batchers[concatenate_p] = _concatenate_batch_rule
def _pad_dtype_rule(operand, padding_value, *, padding_config):
if operand.dtype != padding_value.dtype:
msg = "pad operand and padding_value must be same dtype: got {} and {}."
raise TypeError(msg.format(operand.dtype, padding_value.dtype))
return _input_dtype(operand, padding_value)
def _pad_shape_rule(operand, padding_value, *, padding_config):
lo, hi, interior = zip(*padding_config)
out_shape = onp.add(onp.add(onp.add(lo, hi), operand.shape),
onp.multiply(interior, onp.subtract(operand.shape, 1)))
return tuple(out_shape)
def _pad_transpose(t, operand, padding_value, *, padding_config):
if t is ad_util.zero:
return [ad_util.zero if ad.is_undefined_primal(operand) else None,
ad_util.zero if ad.is_undefined_primal(padding_value) else None]
lo, hi, interior = zip(*padding_config)
total = lambda x: _reduce_sum(x, list(range(t.ndim)))
def t_op():
unpad_config = zip(onp.negative(lo), onp.negative(hi), onp.zeros_like(interior))
unpadded = pad(t, onp.array(0., t.dtype), unpad_config)
return slice(unpadded, onp.zeros_like(lo), unpadded.shape, onp.add(interior, 1))
t_operand = t_op() if ad.is_undefined_primal(operand) else None
t_padv = sub(total(t), total(t_operand)) if ad.is_undefined_primal(padding_value) else None
return [t_operand, t_padv]
def _pad_batch_rule(batched_args, batch_dims, *, padding_config):
operand, padding_value = batched_args
operand_bdim, padding_value_bdim = batch_dims
if padding_value_bdim is None:
assert operand_bdim is not None
padding_config = list(padding_config)
padding_config.insert(operand_bdim, (0, 0, 0))
return pad(operand, padding_value, padding_config), operand_bdim
else:
raise NotImplementedError # loop and stack
def _pad_translation_rule(c, operand, padding_value, *, padding_config):
return xops.Pad(operand, padding_value,
xc.make_padding_config(padding_config))
pad_p = standard_primitive(_pad_shape_rule, _pad_dtype_rule, 'pad',
translation_rule=_pad_translation_rule)
ad.deflinear(pad_p, _pad_transpose)
ad.primitive_transposes[pad_p] = _pad_transpose
batching.primitive_batchers[pad_p] = _pad_batch_rule
# We have a nonstandard reshape impl so that we can be lazy about data movement.
def _reshape_impl(operand, *, new_sizes, dimensions):
old_sizes = onp.shape(operand)
if type(operand) is xla.DeviceArray and dimensions is None:
bcast_dims = _is_singleton_reshape(old_sizes, new_sizes)
if bcast_dims is not None:
aval = ShapedArray(new_sizes, operand.dtype)
lazy_expr = lazy.broadcast(operand._lazy_expr, new_sizes, bcast_dims)
return xla.DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)
if type(operand) is pxla.ShardedDeviceArray and dimensions is None:
array = _reshape_sharded_device_array(operand, new_sizes, old_sizes)
if array is not None:
return array
return xla.apply_primitive(reshape_p, operand, new_sizes=new_sizes,
dimensions=dimensions)
def _is_singleton_reshape(old, new):
# A singleton reshape is one where only singleton dimensions are added. We
# want to detect them because they can be expressed as (lazy) broadcasts.
old, new = iter(old), iter(new)
d1, d2 = next(old, None), next(new, None)
bcast_dims = []
i = 0
while True:
if d1 is d2 is None:
return bcast_dims
elif d1 == d2:
bcast_dims.append(i)
i += 1
d1, d2 = next(old, None), next(new, None)
elif d2 == 1:
i += 1
d2 = next(new, None)
else:
return None
def _reshape_sharded_device_array(array, new_sizes, old_sizes):
# TODO(jekbradbury): the axis split/merge logic below assumes that
# ShardedDevicesArrays are always sharded across their leading axes. Remove
# this constraint, especially if/when we add APIs that produce sharding across
# interior axes.
if any(num_shards != 1 for num_shards
in array.sharding_spec.shards_per_axis[1:]):
return None
# TODO(skye): handle replicated buffers
if array.sharding_spec.replication_factor != 1:
return None
# ShardedDevicesArrays require all buffers to have the same shape
chunk_shape = array.device_buffers[0].shape().dimensions()
chunk_size = chunk_shape[0] if len(chunk_shape) > 0 else 1
if _is_axis_merge(old_sizes, new_sizes):
num_chunks, ragged = divmod(new_sizes[0], chunk_size)
if ragged: return None
aval = ShapedArray(new_sizes, array.dtype)
sharding_spec = pxla.ShardingSpec(
shards_per_axis=(num_chunks,) + (1,) * (len(new_sizes) - 1),
is_axis_materialized=(True,) * len(new_sizes),
replication_factor=1)
return pxla.ShardedDeviceArray(aval, sharding_spec, array.device_buffers)
if _is_axis_split(old_sizes, new_sizes):
split_axis_size, ragged = divmod(old_sizes[0], chunk_size)
if ragged: return None
if new_sizes[0] != split_axis_size: return None
aval = ShapedArray(new_sizes, array.dtype)
sharding_spec = pxla._pmap_sharding_spec(
new_sizes[0], new_sizes[0], ShapedArray(new_sizes[1:], array.dtype), True)
return pxla.ShardedDeviceArray(aval, sharding_spec, array.device_buffers)
return None
def _is_axis_merge(s1, s2):
# TODO(skye): we might still be able to handle these cases as merges, I
# haven't thought about it much.
if len(s1) < 2 or len(s2) < 1: return False
return s1[2:] == s2[1:] and s1[0] * s1[1] == s2[0]
def _is_axis_split(s1, s2):
return _is_axis_merge(s2, s1)
def _reshape_shape_rule(operand, *, new_sizes, dimensions):
if not onp.all(onp.greater_equal(new_sizes, 0)):
msg = 'reshape new_sizes must all be positive, got {}.'
raise TypeError(msg.format(new_sizes))
if prod(onp.shape(operand)) != prod(new_sizes):
msg = 'reshape total size must be unchanged, got new_sizes {} for shape {}.'
raise TypeError(msg.format(new_sizes, onp.shape(operand)))
if dimensions is not None:
if set(dimensions) != set(range(onp.ndim(operand))):
msg = ('reshape dimensions must be a permutation of operand dimensions, '
'got dimensions {} for shape {}.')
raise TypeError(msg.format(dimensions, onp.shape(operand)))
return tuple(new_sizes)
def _reshape_dtype_rule(operand, *, new_sizes, dimensions):
return operand.dtype
def _reshape_translation_rule(c, operand, *, new_sizes, dimensions):
if dimensions is None:
return xops.Reshape(operand, new_sizes)
else:
return xops.Reshape(operand, dimensions, new_sizes)
def _reshape_transpose_rule(t, operand, *, new_sizes, dimensions):
assert ad.is_undefined_primal(operand)
if dimensions is None:
return [reshape(t, operand.aval.shape)]
else:
return [transpose(reshape(t, onp.take(operand.aval.shape, dimensions)),
onp.argsort(dimensions))]
def _reshape_batch_rule(batched_args, batch_dims, *, new_sizes, dimensions):
operand, = batched_args
bdim, = batch_dims
operand = batching.moveaxis(operand, bdim, 0)
if dimensions is not None:
dimensions = (0,) + tuple(onp.add(1, dimensions))
return reshape(operand, operand.shape[:1] + new_sizes, dimensions), 0
reshape_p = standard_primitive(_reshape_shape_rule, _reshape_dtype_rule,
'reshape', _reshape_translation_rule)
reshape_p.def_impl(_reshape_impl)
ad.deflinear2(reshape_p, _reshape_transpose_rule)
batching.primitive_batchers[reshape_p] = _reshape_batch_rule
def _rev_shape_rule(operand, *, dimensions):
_check_shapelike('rev', 'dimensions', dimensions)
if len(set(dimensions)) != len(dimensions):
msg = 'rev dimensions must be unique, got {}.'
raise TypeError(msg.format(dimensions))
if dimensions and not _max(dimensions) < operand.ndim:
msg = ('rev dimensions must all be less than operand ndim, got dimensions '
'{} for operand ndim {}.')
raise TypeError(msg.format(dimensions, operand.ndim))
return operand.shape
def _rev_batch_rule(batched_args, batch_dims, *, dimensions):
operand, = batched_args
bdim, = batch_dims
new_dimensions = [i + 1 if i >= bdim else i for i in dimensions]
return rev(operand, new_dimensions), bdim
rev_p = standard_primitive(_rev_shape_rule, _input_dtype, 'rev')
ad.deflinear(rev_p, lambda t, dimensions: [rev(t, dimensions)])
batching.primitive_batchers[rev_p] = _rev_batch_rule
def _transpose_impl(operand, *, permutation):
if type(operand) is xla.DeviceArray:
lazy_expr = lazy.transpose(operand._lazy_expr, permutation)
aval = ShapedArray(lazy_expr.shape, operand.dtype)
return xla.DeviceArray(aval, operand._device, lazy_expr, operand.device_buffer)
else:
return xla.apply_primitive(transpose_p, operand, permutation=permutation)
def _transpose_shape_rule(operand, *, permutation):
if not isinstance(permutation, (tuple, list, onp.ndarray)):
msg = "transpose permutation must be a tuple/list/ndarray, got {}."
raise TypeError(msg.format(type(permutation)))
if tuple(sorted(permutation)) != tuple(range(operand.ndim)):
msg = ("transpose permutation isn't a permutation of operand dimensions, "
"got permutation {} for operand shape {}.")
raise TypeError(msg.format(permutation, operand.shape))
return tuple(onp.take(operand.shape, permutation))
def _transpose_batch_rule(batched_args, batch_dims, *, permutation):
operand, = batched_args
bdim, = batch_dims
perm = (bdim,) + tuple(i if i < bdim else i+1 for i in permutation)
return transpose(operand, perm), 0
transpose_p = standard_primitive(_transpose_shape_rule, _input_dtype,
'transpose')
transpose_p.def_impl(_transpose_impl)
ad.deflinear(transpose_p,
lambda t, permutation: [transpose(t, onp.argsort(permutation))])
batching.primitive_batchers[transpose_p] = _transpose_batch_rule
def _select_shape_rule(pred, on_true, on_false):
if on_true.shape != on_false.shape:
msg = "select on_true and on_false must have the same shape, got {} and {}."
raise TypeError(msg.format(on_true.shape, on_false.shape))
if pred.shape and pred.shape != on_true.shape:
msg = ("select pred must be scalar or have the same shape as on_true and "
"on_false, got pred shape {} for on_true and on_false of shape {}.")
raise TypeError(msg.format(pred.shape, on_true.shape))
return on_true.shape
def _select_dtype_rule(pred, on_true, on_false):
_check_same_dtypes("select", False, on_true.dtype, on_false.dtype)
if not dtypes.issubdtype(pred.dtype, onp.bool_):
msg = "select pred must be boolean type, got {}."
raise TypeError(msg.format(pred.dtype))
return on_true.dtype
def _select_transpose_rule(t, pred, on_true, on_false):
assert not ad.is_undefined_primal(pred)
if t is ad_util.zero:
return [None,
ad_util.zero if ad.is_undefined_primal(on_true) else None,
ad_util.zero if ad.is_undefined_primal(on_false) else None]
else:
zeros = full_like(t, 0)
return [None,
select(pred, t, zeros) if ad.is_undefined_primal(on_true) else None,
select(pred, zeros, t) if ad.is_undefined_primal(on_false) else None]
def _select_batch_rule(batched_args, batch_dims, **unused_kwargs):
pred, on_true, on_false, = batched_args
pred_bdim, ot_bdim, of_bdim = batch_dims
size = next(x.shape[i] for x, i in zip(batched_args, batch_dims)
if i is not None)
# avoid transposes and some broadcasts in special cases
if pred_bdim == ot_bdim == of_bdim:
if onp.shape(pred) == onp.shape(on_true):
return select(pred, on_true, on_false), pred_bdim
else:
# vmapped function had a scalar pred with nonscalar args
assert onp.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [pred_bdim])
return select(pred, on_true, on_false), pred_bdim
elif onp.ndim(pred) == 0 and ot_bdim is not None and of_bdim is not None:
if ot_bdim == of_bdim:
return select(pred, on_true, on_false), ot_bdim
elif onp.shape(on_true) == onp.shape(on_false):
on_false = batching.moveaxis(on_false, of_bdim, ot_bdim)
return select(pred, on_true, on_false), ot_bdim
pred = batching.bdim_at_front(pred, pred_bdim, size) if onp.shape(pred) else pred
if not onp.shape(on_true) == onp.shape(on_false) == ():
on_true = batching.bdim_at_front(on_true, ot_bdim, size)
on_false = batching.bdim_at_front(on_false, of_bdim, size)
assert onp.shape(on_true) == onp.shape(on_false)
if 0 < onp.ndim(pred) < onp.ndim(on_true):
# vmapped function had a scalar pred with nonscalar args
assert onp.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [0])
if onp.ndim(pred) > onp.ndim(on_true):
assert onp.ndim(on_true) == 0
on_true = broadcast(on_true, pred.shape)
on_false = broadcast(on_false, pred.shape)
return select(pred, on_true, on_false), 0
select_p = standard_primitive(_select_shape_rule, _select_dtype_rule, 'select')
ad.defjvp(select_p,
None,
lambda g, b, x, y: select(b, g, _zeros(g)),
lambda g, b, x, y: select(b, _zeros(g), g))
ad.primitive_transposes[select_p] = _select_transpose_rule
batching.primitive_batchers[select_p] = _select_batch_rule
def _slice_shape_rule(operand, *, start_indices, limit_indices, strides):
_check_shapelike("slice", "start_indices", start_indices)
_check_shapelike("slice", "limit_indices", limit_indices)
if operand.ndim != len(start_indices):
msg = ("slice start_indices must have length equal to the number of "
"dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(limit_indices):
msg = ("slice limit_indices must have the same length as start_indices, "
"got start_inidices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if not onp.all(onp.less_equal(limit_indices, operand.shape)):
msg = ("slice limit_indices must be less than or equal to operand shape, "
"got limit_indices {} for operand shape {}.")
raise TypeError(msg.format(limit_indices, operand.shape))
if not onp.all(onp.greater_equal(start_indices, 0)):
msg = ("slice start_indices must be greater than or equal to zero, "
"got start_indices of {}.")
raise TypeError(msg.format(start_indices))
if not onp.all(onp.greater_equal(limit_indices, start_indices)):
msg = ("slice limit_indices must be greater than or equal to start_indices,"
" got start_indices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if strides is None:
strides = onp.ones(operand.ndim, onp.int32)
else:
_check_shapelike("slice", "strides", strides)
if len(strides) != operand.ndim:
msg = ("slice strides must have length equal to the number of dimensions "
"of the operand, got strides {} for operand shape {}.")
raise TypeError(msg.format(strides, operand.shape))
if not onp.all(onp.greater(strides, 0)):
msg = "slice strides must be positive, got {}"
raise TypeError(msg.format(strides))
result_shape = onp.floor_divide(
onp.add(onp.subtract(limit_indices, start_indices), strides) - 1, strides)
return tuple(result_shape)
def _slice_translation_rule(c, operand, *, start_indices, limit_indices,
strides):
return xops.Slice(operand, start_indices, limit_indices,
strides or [1] * len(start_indices))
def _slice_transpose_rule(t, operand, *, start_indices, limit_indices, strides):
assert ad.is_undefined_primal(operand)
operand_shape = operand.aval.shape
if strides is None or onp.all(onp.equal(strides, 1)):
pads = zip(start_indices, onp.subtract(operand_shape, limit_indices),
(0,) * len(start_indices))
else:
real_limits = onp.add(onp.add(start_indices, 1),
onp.multiply(onp.subtract(t.shape, 1), strides))
pads = zip(start_indices, onp.subtract(operand_shape, real_limits),
onp.subtract(strides, 1))
result = pad(t, _const(t, 0), pads)
assert result.shape == operand_shape
return [result]
def _slice_batching_rule(batched_args, batch_dims, *, start_indices,
limit_indices, strides):
operand, = batched_args
bdim, = batch_dims
new_start_indices = list(start_indices)
new_start_indices.insert(bdim, 0)
new_limit_indices = list(limit_indices)
new_limit_indices.insert(bdim, operand.shape[bdim])
if strides is None:
new_strides = None
else:
new_strides = list(strides)
new_strides.insert(bdim, 1)
out = slice(operand, new_start_indices, new_limit_indices, new_strides)
return out, bdim
slice_p = standard_primitive(_slice_shape_rule, _input_dtype, 'slice',
_slice_translation_rule)
ad.deflinear2(slice_p, _slice_transpose_rule)
batching.primitive_batchers[slice_p] = _slice_batching_rule
def _dynamic_slice_shape_rule(operand, *start_indices, slice_sizes):
if operand.ndim != len(start_indices):
msg = ("dynamic_slice start_indices must have length equal to the number "
"of dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(slice_sizes):
msg = ("dynamic_slice slice_sizes must have the same length as "
"start_indices, got start_inidices length {} and slice_sizes {}.")
raise TypeError(msg.format(len(start_indices), slice_sizes))
if not onp.all(onp.less_equal(slice_sizes, operand.shape)):
msg = ("slice slice_sizes must be less than or equal to operand shape, "
"got slice_sizes {} for operand shape {}.")
raise TypeError(msg.format(slice_sizes, operand.shape))
if not onp.all(onp.greater_equal(slice_sizes, 0)):
msg = ("slice slice_sizes must be greater than or equal to zero, "
"got slice_sizes of {}.")
raise TypeError(msg.format(slice_sizes))
return tuple(slice_sizes)
def _dynamic_slice_dtype_rule(operand, *start_indices, slice_sizes):
if any(i.dtype != start_indices[0].dtype or
not dtypes.issubdtype(i.dtype, onp.integer) for i in start_indices):
msg = ("index arguments to dynamic_slice must be integers of the same "
"type, got: {}")
raise TypeError(msg.format(", ".join(i.dtype.name for i in start_indices)))
return operand.dtype
def _dynamic_slice_translation_rule(c, operand, *start_indices, slice_sizes):
return xops.DynamicSlice(operand, start_indices, slice_sizes)
def _dynamic_slice_jvp(primals, tangents, *, slice_sizes):
tangent_out = ad_util.zero
if tangents[0] is not ad_util.zero:
tangent_out = dynamic_slice(tangents[0], primals[1:], slice_sizes)
return dynamic_slice(primals[0], primals[1:], slice_sizes), tangent_out
def _dynamic_slice_transpose_rule(t, operand, *start_indices, slice_sizes):
assert ad.is_undefined_primal(operand)
assert all(not ad.is_undefined_primal(s) for s in start_indices)
operand_shape = operand.aval.shape
zeros = full(operand_shape, tie_in(t, _zero(t)))
return ([dynamic_update_slice(zeros, t, start_indices)] +
[None] * len(start_indices))
def _batch_dynamic_slice_indices(indices, bdims):
size = next((x.shape[i] for x, i in zip(indices, bdims) if i is not None), -1)
if size < 0:
return concatenate([reshape(i, [1]) for i in indices], 0), None
indices = concatenate(
[broadcast_in_dim(x, (size, 1),
broadcast_dimensions=((0,) if i is not None else ()))
for x, i in zip(indices, bdims)],
dimension=1)
return indices, 0
def _dynamic_slice_batching_rule(batched_args, batch_dims, *, slice_sizes):
# A dynamic slice is a special case of gather; we can delegate to the gather
# batching rule.
# TODO(phawkins): consider removing dynamic_slice entirely and using gather
# always.
operand, *start_indices = batched_args
operand_bd, *start_idx_bds = batch_dims
operand_shape = (operand.shape if operand_bd is batching.not_mapped
else tuple(onp.delete(operand.shape, operand_bd)))
dims = tuple(range(len(operand_shape)))
dnums = GatherDimensionNumbers(offset_dims=dims, collapsed_slice_dims=(),
start_index_map=dims)
index, index_bdim = _batch_dynamic_slice_indices(start_indices, start_idx_bds)
return _gather_batching_rule(
[operand, index], [operand_bd, index_bdim], dimension_numbers=dnums,
slice_sizes=slice_sizes)
dynamic_slice_p = standard_primitive(
_dynamic_slice_shape_rule, _dynamic_slice_dtype_rule, 'dynamic_slice',
_dynamic_slice_translation_rule)
ad.primitive_jvps[dynamic_slice_p] = _dynamic_slice_jvp # TODO
ad.primitive_transposes[dynamic_slice_p] = _dynamic_slice_transpose_rule
batching.primitive_batchers[dynamic_slice_p] = _dynamic_slice_batching_rule
def _dynamic_update_slice_shape_rule(operand, update, *start_indices):
if operand.ndim != update.ndim:
msg = ("dynamic_update_slice update must have the same rank as operand, "
"got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
if operand.ndim != len(start_indices):
msg = ("dynamic_update_slice start_indices must have length equal to the "
"rank of operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if not onp.all(onp.less_equal(update.shape, operand.shape)):
msg = ("dynamic_update_slice update shape must be smaller than operand "
"shape, got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
return operand.shape
def _dynamic_update_slice_dtype_rule(operand, update, *start_indices):
_check_same_dtypes("dynamic_update_slice", False, operand.dtype, update.dtype)
if any(i.dtype != start_indices[0].dtype or
not dtypes.issubdtype(i.dtype, onp.integer) for i in start_indices):
msg = ("index arguments to dynamic_update_slice must be integers of the "
"same type, got {}")
raise TypeError(msg.format(", ".join(i.dtype.name for i in start_indices)))
return operand.dtype
def _dynamic_update_slice_jvp(primals, tangents):
operand, update = primals[:2]
start_indices = primals[2:]
g_operand, g_update = tangents[:2]
val_out = dynamic_update_slice(operand, update, start_indices)
if g_operand is ad_util.zero and g_update is ad_util.zero:
tangent_out = ad_util.zero
else:
g_operand = ad.instantiate_zeros(operand, g_operand)
g_update = ad.instantiate_zeros(update, g_update)
tangent_out = dynamic_update_slice(g_operand, g_update, start_indices)
return val_out, tangent_out
def _dynamic_update_slice_transpose_rule(t, operand, update, *start_indices):
assert all(not ad.is_undefined_primal(x) for x in start_indices)
if ad.is_undefined_primal(update):
update_shape = update.aval.shape
else:
update_shape = update.shape
dus = dynamic_update_slice
ds = dynamic_slice
zeros = _zeros(t, shape=update_shape)
operand_t = dus(t, zeros, start_indices) if ad.is_undefined_primal(operand) else None
update_t = ds(t, start_indices, update_shape) if ad.is_undefined_primal(update) else None
return [operand_t, update_t] + [None] * len(start_indices)
def _dynamic_update_slice_translation_rule(c, operand, update, *start_indices):
return xops.DynamicUpdateSlice(operand, update, start_indices)
def _dynamic_update_slice_batching_rule(batched_args, batch_dims):
# A dynamic update slice is a special case of scatter; we can delegate to the
# scatter batching rule.
# TODO(phawkins): consider removing dynamic_update_slice entirely and using
# scatter always.
operand, update, *start_idx = batched_args
operand_bd, update_bd, *start_idx_bd = batch_dims
update_shape = (update.shape if update_bd is batching.not_mapped
else tuple(onp.delete(update.shape, update_bd)))
dims = tuple(range(len(update_shape)))
dnums = ScatterDimensionNumbers(update_window_dims=dims,
inserted_window_dims=(),
scatter_dims_to_operand_dims=dims)
index, index_bdim = _batch_dynamic_slice_indices(start_idx, start_idx_bd)
return _scatter_batching_rule(
scatter, (operand, index, update), (operand_bd, index_bdim, update_bd),
update_jaxpr=None, update_consts=None, dimension_numbers=dnums)
dynamic_update_slice_p = standard_primitive(
_dynamic_update_slice_shape_rule, _dynamic_update_slice_dtype_rule,
'dynamic_update_slice', _dynamic_update_slice_translation_rule)
ad.primitive_jvps[dynamic_update_slice_p] = _dynamic_update_slice_jvp
ad.primitive_transposes[dynamic_update_slice_p] = \
_dynamic_update_slice_transpose_rule
batching.primitive_batchers[dynamic_update_slice_p] = \
_dynamic_update_slice_batching_rule
def _gather_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is GatherDimensionNumbers
proto = xla_client.GatherDimensionNumbers()
proto.offset_dims.extend(dimension_numbers.offset_dims)
proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)
proto.start_index_map.extend(dimension_numbers.start_index_map)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _gather_dtype_rule(operand, start_indices, **kwargs):
if not dtypes.issubdtype(start_indices.dtype, onp.integer):
raise ValueError("start_indices must have an integer type")
return dtypes.canonicalize_dtype(operand.dtype)
def _gather_shape_rule(operand, start_indices, *, dimension_numbers,
slice_sizes):
if len(operand.shape) != len(slice_sizes):
msg = ("slice_sizes must have rank equal to the gather operand; "
"operand.shape={}, slice_sizes={}".format(operand.shape, slice_sizes))
raise ValueError(msg)
result_rank = len(dimension_numbers.offset_dims) + start_indices.ndim - 1
start_indices_shape = iter(start_indices.shape[:-1])
slice_sizes = iter(onp.delete(slice_sizes, dimension_numbers.collapsed_slice_dims))
return tuple(next(slice_sizes) if i in dimension_numbers.offset_dims
else next(start_indices_shape) for i in range(result_rank))
def _gather_translation_rule(c, operand, start_indices, *, dimension_numbers,
slice_sizes):
indices_shape = c.get_shape(start_indices)
return xops.Gather(
operand, start_indices,
_gather_dimensions_proto(indices_shape, dimension_numbers), slice_sizes,
indices_are_sorted=False)
def _gather_jvp_rule(g, operand, start_indices, *, dimension_numbers,
slice_sizes):
return gather(g, start_indices, dimension_numbers, slice_sizes)
def _gather_transpose_rule(t, operand, start_indices, *, dimension_numbers,
slice_sizes):
assert ad.is_undefined_primal(operand)
operand_shape = operand.aval.shape
if t is ad_util.zero:
return [ad_util.zero, ad_util.zero]
zeros = full(operand_shape, tie_in(t, _zero(t)))
scatter_dnums = ScatterDimensionNumbers(
update_window_dims=dimension_numbers.offset_dims,
inserted_window_dims=dimension_numbers.collapsed_slice_dims,
scatter_dims_to_operand_dims=dimension_numbers.start_index_map)
return [scatter_add(zeros, start_indices, t, scatter_dnums), ad_util.zero]
def _gather_batching_rule(batched_args, batch_dims, *, dimension_numbers,
slice_sizes):
operand, start_indices = batched_args
operand_bdim, start_indices_bdim = batch_dims
if operand_bdim is not None and start_indices_bdim is None:
operand = batching.moveaxis(operand, operand_bdim, 0)
slice_sizes = (operand.shape[0],) + slice_sizes
offset_dims = (0,) + tuple(onp.add(1, dimension_numbers.offset_dims))
collapsed_slice_dims = tuple(onp.add(1, dimension_numbers.collapsed_slice_dims))
start_index_map = tuple(onp.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
elif operand_bdim is None and start_indices_bdim is not None:
start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)
offset_dims = tuple(onp.add(1, dimension_numbers.offset_dims))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=dimension_numbers.collapsed_slice_dims,
start_index_map=dimension_numbers.start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
else:
# move our batch dimensions to the front to preserve sanity
operand = batching.moveaxis(operand, operand_bdim, 0)
start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)
# Example: user code had start_indices shape (3, 4, 5), and we have to deal
# with start_indices shape (7, 3, 4, 5). We transform that to a
# start_indices of shape (7, 3, 4, 6) where we concatenated an iota that
# counts along our batch dimension to the front of the ndindex.
count_shape = list(start_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(start_indices.dtype, tuple(count_shape), 0)
start_indices = concatenate([counts, start_indices], len(count_shape) - 1)
slice_sizes = (1,) + slice_sizes
collapsed_slice_dims = (0,) + tuple(onp.add(1, dimension_numbers.collapsed_slice_dims))
offset_dims = tuple(onp.add(1, dimension_numbers.offset_dims))
start_index_map = (0,) + tuple(onp.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
gather_p = standard_primitive(
_gather_shape_rule, _gather_dtype_rule, 'gather',
_gather_translation_rule)
ad.defjvp(gather_p, _gather_jvp_rule, None)
ad.primitive_transposes[gather_p] = _gather_transpose_rule
batching.primitive_batchers[gather_p] = _gather_batching_rule
def _scatter_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is ScatterDimensionNumbers
proto = xla_client.ScatterDimensionNumbers()
proto.update_window_dims.extend(dimension_numbers.update_window_dims)
proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)
proto.scatter_dims_to_operand_dims.extend(
dimension_numbers.scatter_dims_to_operand_dims)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _scatter_dtype_rule(operand, scatter_indices, updates, **kwargs):
if not dtypes.issubdtype(scatter_indices.dtype, onp.integer):
raise ValueError("scatter_indices must have an integer type")
_check_same_dtypes("scatter", False, operand.dtype, updates.dtype)
return dtypes.canonicalize_dtype(operand.dtype)
def _scatter_shape_rule(operand, scatter_indices, updates, **kwargs):
return operand.shape
def _scatter_translation_rule(c, operand, scatter_indices, updates,
update_jaxpr, update_consts, dimension_numbers):
dtype = c.get_shape(operand).numpy_dtype()
init_value = xb.constant(c, onp.array(0, dtype))
update_computation = _reduction_computation(
c, update_jaxpr, update_consts, init_value)
indices_shape = c.get_shape(scatter_indices)
return xops.Scatter(operand, scatter_indices, updates, update_computation,
_scatter_dimensions_proto(indices_shape, dimension_numbers),
False, False)
def _scatter_add_jvp(primals, tangents, *, update_jaxpr, update_consts,
dimension_numbers):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
val_out = scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers)
if g_operand is ad_util.zero and g_updates is ad_util.zero:
tangent_out = ad_util.zero
else:
g_operand = ad.instantiate_zeros(operand, g_operand)
g_updates = ad.instantiate_zeros(updates, g_updates)
tangent_out = scatter_add_p.bind(
g_operand, scatter_indices, g_updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers)
return val_out, tangent_out
def _scatter_add_transpose_rule(t, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers):
assert not ad.is_undefined_primal(scatter_indices)
if ad.is_undefined_primal(updates):
updates_shape = updates.aval.shape
else:
updates_shape = updates.shape
if t is ad_util.zero:
return [ad_util.zero, None, ad_util.zero]
operand_t = update_t = None
if ad.is_undefined_primal(operand):
operand_t = t
if ad.is_undefined_primal(updates):
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])
pos += 1
update_t = gather(t, scatter_indices, dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
return [operand_t, None, update_t]
def _scatter_mul_transpose_rule(t, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers):
assert not ad.is_undefined_primal(scatter_indices)
if ad.is_undefined_primal(updates):
updates_shape = updates.aval.shape
else:
updates_shape = updates.shape
if t is ad_util.zero:
return [ad_util.zero, None, ad_util.zero]
operand_t = update_t = None
if ad.is_undefined_primal(operand):
operand_t = scatter_mul(t, scatter_indices, updates,
dimension_numbers=dimension_numbers)
if ad.is_undefined_primal(updates):
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])
pos += 1
update_t = gather(mul(t, operand), scatter_indices,
dimension_numbers=gather_dnums, slice_sizes=slice_sizes)
return [operand_t, None, update_t]
def _scatter_batching_rule(scatter_op, batched_args, batch_dims, *,
update_jaxpr, update_consts, dimension_numbers):
operand, scatter_indices, updates = batched_args
operand_bdim, scatter_indices_bdim, updates_bdim = batch_dims
del update_jaxpr, update_consts # Unused.
# move the operand batch dim to the front if it is not None, otherwise create
# it at the front (so that we can scatter into it)
size = next(x.shape[ax] for x, ax in zip(batched_args, batch_dims)
if ax is not None)
operand = batching.bdim_at_front(operand, operand_bdim, size)
operand_bdim = 0
updates = batching.bdim_at_front(updates, updates_bdim, size)
if scatter_indices_bdim is None:
inserted_window_dims = tuple(onp.add(1, dimension_numbers.inserted_window_dims))
update_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.update_window_dims))
scatter_dims_to_operand_dims = tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(operand, scatter_indices, updates, dnums), 0
# see the third case in _gather_batching_rule for comparison and comments
scatter_indices = batching.bdim_at_front(
scatter_indices, scatter_indices_bdim, size)
count_shape = list(scatter_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(scatter_indices.dtype, tuple(count_shape), 0)
scatter_indices = concatenate([counts, scatter_indices],
len(count_shape) - 1)
update_window_dims = tuple(onp.add(1, dimension_numbers.update_window_dims))
inserted_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.inserted_window_dims))
scatter_dims_to_operand_dims = (0,) + tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(operand, scatter_indices, updates, dnums), 0
scatter_add_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-add',
_scatter_translation_rule)
ad.primitive_jvps[scatter_add_p] = _scatter_add_jvp
ad.primitive_transposes[scatter_add_p] = _scatter_add_transpose_rule
batching.primitive_batchers[scatter_add_p] = (
partial(_scatter_batching_rule, scatter_add))
scatter_mul_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-mul',
_scatter_translation_rule)
def _scatter_mul_jvp_rhs(g, x, i, y, *, dimension_numbers, **kw):
return mul(x, scatter_add(zeros_like_array(x), i, g,
dimension_numbers=dimension_numbers))
ad.defjvp(scatter_mul_p,
lambda g, x, i, y, **kw: scatter_mul_p.bind(g, i, y, **kw),
None,
_scatter_mul_jvp_rhs)
ad.primitive_transposes[scatter_mul_p] = _scatter_mul_transpose_rule
batching.primitive_batchers[scatter_mul_p] = (
partial(_scatter_batching_rule, scatter_mul))
# TODO(jlebar): Add derivatives.
scatter_min_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-min',
_scatter_translation_rule)
batching.primitive_batchers[scatter_min_p] = (
partial(_scatter_batching_rule, scatter_min))
# TODO(jlebar): Add derivatives.
scatter_max_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-max',
_scatter_translation_rule)
batching.primitive_batchers[scatter_max_p] = (
partial(_scatter_batching_rule, scatter_max))
def _scatter_jvp(primals, tangents, *, update_jaxpr, update_consts,
dimension_numbers):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
dnums = dimension_numbers
if g_operand is ad_util.zero and g_updates is ad_util.zero:
val_out = scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dnums)
tangent_out = ad_util.zero
return val_out, tangent_out
g_operand = ad.instantiate_zeros(operand, g_operand)
g_updates = ad.instantiate_zeros(updates, g_updates)
# If there are overlapping indices in the scatter, it is unspecified which
# update "wins". So we use the following perhaps surprising scheme:
# a) attach a positive ID to each update in updates, forming (value, id) pairs
# (using a new array dimension because scatter doesn't actually support
# a) add unique positive IDs (iotas) to the updates, and zeros to the operand.
operand_shape = operand.shape
updates_shape = updates.shape
updates_dtype = _dtype(updates)
new_operand = reshape(operand, (1,) + operand_shape)
new_operand = pad(new_operand, _zero(operand),
((0, 1, 0),) + tuple((0, 0, 0) for _ in operand_shape))
# We specify the dtype here in case `updates_shape` is an empty tuple, in
# which case numpy defaults to float64.
ids_shape = onp.array(updates_shape, dtype=onp.int32)
ids_shape[dnums.update_window_dims,] = 1
num_ids = onp.prod(ids_shape)
update_ids = add(reshape(iota(updates_dtype, num_ids), ids_shape),
_ones(updates))
# TODO(phawkins): there is a potential bug here if the number of updates
# is large enough to overflow the number of mantissa bits in a float so IDs
# end up colliding. We could also utilize the exponent and sign bits, with a
# little more work.
assert num_ids < (2 ** dtypes.finfo(updates_dtype).nmant)
updates = reshape(updates, (1,) + updates_shape)
reshaped_update_ids = reshape(update_ids, (1,) + updates_shape)
updates_and_ids = concatenate((updates, reshaped_update_ids), 0)
new_dnums = ScatterDimensionNumbers(
update_window_dims=(0,) + tuple(d + 1 for d in dnums.update_window_dims),
inserted_window_dims=tuple(d + 1 for d in dnums.inserted_window_dims),
scatter_dims_to_operand_dims=tuple(d + 1 for d in dnums.scatter_dims_to_operand_dims))
outputs = scatter_p.bind(
new_operand, scatter_indices, updates_and_ids, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=new_dnums)
val_out = index_in_dim(outputs, 0, keepdims=False)
scattered_ids = index_in_dim(outputs, 1, keepdims=False)
# b) compute the inverse gather that "undoes" the scatter on the id values.
gather_dnums = GatherDimensionNumbers(
offset_dims=dnums.update_window_dims,
collapsed_slice_dims=dnums.inserted_window_dims,
start_index_map=dnums.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(scattered_ids.shape)):
if i in dnums.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dnums.update_window_dims[pos]])
pos += 1
gathered_update_ids = gather(scattered_ids, scatter_indices,
dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
# c) mask off input JVP elements that do not correspond to a primal output.
masked_g_operand = select(eq(scattered_ids, _zeros(scattered_ids)),
g_operand, _zeros(g_operand))
masked_g_updates = select(eq(update_ids, gathered_update_ids),
g_updates, _zeros(g_updates))
# d) perform a scatter-add to compute the tangent output.
tangent_out = scatter_add(masked_g_operand, scatter_indices, masked_g_updates,
dimension_numbers=dnums)
return val_out, tangent_out
scatter_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter',
_scatter_translation_rule)
ad.primitive_jvps[scatter_p] = _scatter_jvp
batching.primitive_batchers[scatter_p] = (
partial(_scatter_batching_rule, scatter))
def _reduce_shape_rule(operand, init_value, *, computation, jaxpr, consts,
dimensions):
return tuple(onp.delete(operand.shape, dimensions))
def _reduce_translation_rule(c, operand, init_value, *, computation, jaxpr,
consts, dimensions):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
return xops.Reduce(c, [operand], [init_value], xla_computation, dimensions)
def _reduce_batch_rule(batched_args, batch_dims, *, computation, jaxpr, consts,
dimensions):
operand, init_value = batched_args
operand_bdim, init_value_bdim = batch_dims
if init_value_bdim is None:
assert operand_bdim is not None
new_dimensions = [d + bool(d >= operand_bdim) for d in dimensions]
new_operand_bdim = operand_bdim - int(onp.sum(onp.less(dimensions, operand_bdim)))
return reduce(operand, init_value, computation, new_dimensions), new_operand_bdim
else:
raise NotImplementedError # loop and stack
def _reduction_computation(c, jaxpr, consts, init_value):
shape = c.get_shape(init_value)
axis_env = xla.AxisEnv(1) # no parallel primitives inside reductions
subc = xla_bridge.make_computation_builder("reduction_computation")
assert len(consts) == 0, "Reduction computations cannot have constants"
args = [xb.parameter(subc, 0, shape), xb.parameter(subc, 1, shape)]
out, = xla.jaxpr_subcomp(subc, jaxpr, None, axis_env, consts, '', *args)
return subc.build(out)
def _masking_defreducer(prim, identity):
masking.masking_rules[prim] = partial(_reducer_masking_rule, prim, identity)
def _reducer_masking_rule(prim, identity, padded_vals, logical_shapes,
axes):
(padded_val,), (logical_shape,) = padded_vals, logical_shapes
padded_shape = masking.padded_shape_as_value(padded_val.shape)
masks = [broadcasted_iota(onp.int32, padded_shape, i) < d
for i, d in enumerate(logical_shape) if i in axes]
mask = _reduce(operator.and_, masks)
masked_val = select(mask, padded_val, identity(padded_shape, padded_val.dtype))
return prim.bind(masked_val, axes=axes)
reduce_p = standard_primitive(_reduce_shape_rule, _input_dtype, 'reduce',
_reduce_translation_rule)
batching.primitive_batchers[reduce_p] = _reduce_batch_rule
def _reduce_number_dtype_rule(name, operand, *args, **kw):
if not dtypes.issubdtype(operand.dtype, onp.number):
raise TypeError("{} does not accept dtype {}. Accepted dtypes are subtypes "
"of number.".format(name, onp.dtype(operand.dtype).name))
return dtypes.canonicalize_dtype(operand.dtype)
def _reduce_sum_shape_rule(operand, *, axes):
return _reduce_op_shape_rule(operand, axes=axes)
def _reduce_sum_translation_rule(c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, onp.array(0, dtype))],
xla.primitive_subcomputation(add_p, scalar, scalar),
axes)
def _reduce_sum_transpose_rule(cotangent, operand, *, axes):
assert ad.is_undefined_primal(operand)
input_shape = operand.aval.shape
broadcast_dimensions = tuple(onp.delete(onp.arange(len(input_shape)), axes))
result = broadcast_in_dim(cotangent, input_shape, broadcast_dimensions)
assert result.shape == input_shape
return [result]
reduce_sum_p = standard_primitive(
_reduce_sum_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_sum'),
'reduce_sum', _reduce_sum_translation_rule)
ad.deflinear2(reduce_sum_p, _reduce_sum_transpose_rule)
batching.defreducer(reduce_sum_p)
_masking_defreducer(reduce_sum_p,
lambda shape, dtype: onp.broadcast_to(onp.array(0, dtype), shape))
def _reduce_op_shape_rule(operand, *, axes):
return tuple(onp.delete(operand.shape, axes))
def _reduce_prod_translation_rule(c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, onp.array(1, dtype))],
xla.primitive_subcomputation(mul_p, scalar, scalar), axes)
def _reduce_prod_jvp_rule(primals, tangents, *, axes):
operand, = primals
tangent, = tangents
input_shape = onp.array(operand.shape)
n = onp.prod(input_shape[list(axes)])
non_axes = onp.delete(onp.arange(len(input_shape)), axes)
# Move the reduced axes to the front, and flatten them to 1D.
permutation = axes + tuple(non_axes)
new_shape = (n,) + tuple(input_shape[non_axes])
operand = reshape(operand, new_shape, permutation)
tangent = reshape(tangent, new_shape, permutation)
def _reduce_prod_tree(x, axis=0):
while x.shape[axis] > 1:
n = x.shape[axis]
n1 = (n + 1) // 2
n2 = n - n1
x1 = slice_in_dim(x, 0, n1)
x2 = slice_in_dim(x, n1, None)
if n2 != n1:
paddings = [(0, 0, 0)] * len(x.shape)
paddings[axis] = (0, 1, 0)
x2 = pad(x2, _const(x, 1), paddings)
x = x1 * x2
shape = list(x.shape)
del shape[axis]
return reshape(x, shape)
return api.jvp(_reduce_prod_tree, (operand,), (tangent,))
reduce_prod_p = standard_primitive(
_reduce_op_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_prod'),
'reduce_prod', _reduce_prod_translation_rule)
ad.primitive_jvps[reduce_prod_p] = _reduce_prod_jvp_rule
batching.defreducer(reduce_prod_p)
def _reduce_chooser_shape_rule(operand, *, axes):
return tuple(onp.delete(operand.shape, axes))
def _reduce_chooser_translation_rule(prim, identity, c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, identity(dtype))],
xla.primitive_subcomputation(prim, scalar, scalar), axes)
def _reduce_chooser_jvp_rule(g, ans, operand, *, axes):
# TODO(mattjj): an alternative is to use variadic reduce to compute the chosen
# locations in a single pass (rather than comparing equality) and use a
# gather, and/or even push along the chosen elements of g (b/112040122)
shape = [1 if i in axes else d for i, d in enumerate(operand.shape)]
location_indicators = convert_element_type(
_eq_meet(operand, reshape(ans, shape)), g.dtype)
counts = _reduce_sum(location_indicators, axes)
return div(_reduce_sum(mul(g, location_indicators), axes), counts)
_reduce_max_translation_rule = partial(_reduce_chooser_translation_rule, max_p,
_get_max_identity)
reduce_max_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_max', _reduce_max_translation_rule)
ad.defjvp2(reduce_max_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_max_p)
_reduce_min_translation_rule = partial(
_reduce_chooser_translation_rule, min_p, _get_min_identity)
reduce_min_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_min', _reduce_min_translation_rule)
ad.defjvp2(reduce_min_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_min_p)
def _reduce_logical_shape_rule(operand, *, axes):
if operand.dtype != onp.bool_:
msg = "logical reduction requires operand dtype bool, got {}."
raise TypeError(msg.format(operand.dtype))
return tuple(onp.delete(operand.shape, axes))
def _reduce_logical_translation_rule(prim, identity, c, operand, *, axes):
scalar = ShapedArray((), onp.bool_)
return xops.Reduce(c, [operand], [xb.constant(c, identity(onp.bool_))],
xla.primitive_subcomputation(prim, scalar, scalar), axes)
_reduce_or_translation_rule = partial(_reduce_logical_translation_rule,
or_p, _get_max_identity)
reduce_or_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(onp.bool_),
'reduce_or', _reduce_or_translation_rule)
batching.defreducer(reduce_or_p)
_reduce_and_translation_rule = partial(_reduce_logical_translation_rule,
and_p, _get_min_identity)
reduce_and_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(onp.bool_),
'reduce_and', _reduce_and_translation_rule)
batching.defreducer(reduce_and_p)
def _reduce_window_shape_rule(operand, init_value, *, jaxpr, consts,
window_dimensions, window_strides, padding):
if operand.dtype != init_value.dtype:
msg = ("reduce_window got inconsistent dtypes for operand and init_value: "
" got operand dtype {} and init_value dtype {}.")
raise TypeError(msg.format(operand.dtype, init_value.dtype))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
def _reduce_window_translation_rule(c, operand, init_value, *, jaxpr, consts,
window_dimensions, window_strides, padding):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.ReduceWindowWithGeneralPadding(
operand, init_value, xla_computation, window_dimensions,
window_strides, (), (), pads)
def _generic_reduce_window_batch_rule(
batched_args, batch_dims, *, jaxpr, consts, window_dimensions,
window_strides, padding):
operand, init = batched_args
bdim, init_bdim = batch_dims
if init_bdim is not None:
raise NotImplementedError("reduce_window batching is not implemented for "
"initial values")
def reduce_window(x, window_dimensions, window_strides, padding):
return reduce_window_p.bind(
x, init, jaxpr=jaxpr, consts=consts, window_dimensions=window_dimensions,
window_strides=window_strides, padding=padding)
return _reduce_window_batch_rule(reduce_window, (operand,), (bdim,),
window_dimensions, window_strides, padding)
reduce_window_p = standard_primitive(
_reduce_window_shape_rule, _input_dtype, 'reduce_window',
_reduce_window_translation_rule)
batching.primitive_batchers[reduce_window_p] = _generic_reduce_window_batch_rule
def _reduce_window_sum_shape_rule(operand, *, window_dimensions, window_strides,
padding):
if not dtypes.issubdtype(operand.dtype, onp.number):
msg = "operand to reduce_window_sum must have a number dtype, got {}"
raise TypeError(msg.format(onp.dtype(operand.dtype).name))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
def _reduce_window_sum_translation_rule(c, operand, *, window_dimensions,
window_strides, padding):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.ReduceWindowWithGeneralPadding(
operand, xb.constant(c, onp.array(0, dtype)),
xla.primitive_subcomputation(add_p, scalar, scalar), window_dimensions,
window_strides, (), (), pads)
def _reduce_window_sum_transpose_rule(cotangent, operand, *, window_dimensions,
window_strides, padding):
assert ad.is_undefined_primal(operand)
input_shape = operand.aval.shape
in_pads = padtype_to_pads(input_shape, window_dimensions, window_strides,
padding)
ones = [1] * len(input_shape)
pads = _conv_general_vjp_lhs_padding(
input_shape, window_dimensions, window_strides, cotangent.shape, in_pads,
ones, ones)
padding_config = [(lo, hi, stride - 1)
for (lo, hi), stride in zip(pads, window_strides)]
pad_cotangent = pad(cotangent, _zero(cotangent), padding_config)
result = _reduce_window_sum(pad_cotangent, window_dimensions, ones,
xla_client.PaddingType.VALID)
assert result.shape == input_shape
return [result]
def _reduce_window_batch_rule(reduce_window, batched_args, bdims, *,
window_dimensions, window_strides, padding):
operand, = batched_args
bdim, = bdims
if bdim is not None:
window_dimensions = \
window_dimensions[:bdim] + (1,) + window_dimensions[bdim:]
window_strides = window_strides[:bdim] + (1,) + window_strides[bdim:]
operand = reduce_window(
operand, window_dimensions, window_strides, padding)
return operand, bdim
reduce_window_sum_p = standard_primitive(
_reduce_window_sum_shape_rule, _input_dtype, 'reduce_window_sum',
_reduce_window_sum_translation_rule)
ad.deflinear2(reduce_window_sum_p, _reduce_window_sum_transpose_rule)
batching.primitive_batchers[reduce_window_sum_p] = partial(
_reduce_window_batch_rule, _reduce_window_sum)
def _reduce_window_chooser_translation_rule(
prim, identity, c, operand, *, window_dimensions, window_strides, padding):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.ReduceWindowWithGeneralPadding(
operand, xb.constant(c, identity(dtype)),
xla.primitive_subcomputation(prim, scalar, scalar), window_dimensions,
window_strides, (), (), pads)
def _reduce_window_chooser_jvp_rule(prim, g, operand, *, window_dimensions,
window_strides, padding):
assert prim is max_p or prim is min_p
select_prim = ge_p if prim is max_p else le_p
return _select_and_gather_add(g, operand, select_prim, window_dimensions,
window_strides, padding)
def _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding):
_check_shapelike("reduce_window", "window_dimensions", window_dimensions)
_check_shapelike("reduce_window", "window_strides", window_strides)
if operand.ndim != len(window_dimensions):
msg = ("reduce_window got the wrong number of window_dimensions for "
"operand: got operand shape {} with window_dimensions {}.")
raise TypeError(msg.format(operand.shape, window_dimensions))
if len(window_strides) != len(window_dimensions):
msg = ("reduce_window got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
return reduce_window_shape_tuple(operand.shape, window_dimensions,
window_strides, padding)
def reduce_window_shape_tuple(operand_shape, window_dimensions, window_strides,
padding):
pads = padtype_to_pads(operand_shape, window_dimensions, window_strides, padding)
operand_padded = onp.add(operand_shape, onp.add(*zip(*pads)))
t = onp.floor_divide(
onp.subtract(operand_padded, window_dimensions), window_strides) + 1
return tuple(t)
_reduce_window_max_translation_rule = partial(
_reduce_window_chooser_translation_rule, max_p, _get_max_identity)
reduce_window_max_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_max',
_reduce_window_max_translation_rule)
ad.defjvp(reduce_window_max_p, partial(_reduce_window_chooser_jvp_rule, max_p))
batching.primitive_batchers[reduce_window_max_p] = partial(
_reduce_window_batch_rule, _reduce_window_max)
_reduce_window_min_translation_rule = partial(
_reduce_window_chooser_translation_rule, min_p, _get_min_identity)
reduce_window_min_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_min',
_reduce_window_min_translation_rule)
ad.defjvp(reduce_window_min_p, partial(_reduce_window_chooser_jvp_rule, min_p))
_reduce_window_min_batch_rule = partial(_reduce_window_batch_rule,
_reduce_window_min)
batching.primitive_batchers[reduce_window_min_p] = partial(
_reduce_window_batch_rule, _reduce_window_min)
def _select_and_scatter_shape_rule(
operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
_check_shapelike("select_and_scatter", "window_dimensions", window_dimensions)
_check_shapelike("select_and_scatter", "window_strides", window_strides)
if len(window_dimensions) != len(window_strides):
msg = ("select_and_scatter got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
return operand.shape
def _select_and_scatter_translation(
c, operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
select = _reduction_computation(c, select_jaxpr, select_consts, init_value)
scatter = _reduction_computation(c, scatter_jaxpr, scatter_consts, init_value)
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, pads, source,
init_value, scatter)
select_and_scatter_p = standard_primitive(
_select_and_scatter_shape_rule, _input_dtype, 'select_and_scatter',
_select_and_scatter_translation)
def _select_and_scatter_add_shape_rule(
source, operand, *, select_prim, window_dimensions, window_strides,
padding):
return operand.shape
def _select_and_scatter_add_translation(
c, source, operand, *, select_prim, window_dimensions, window_strides,
padding):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
select = xla.primitive_subcomputation(select_prim, scalar, scalar)
scatter = xla.primitive_subcomputation(add_p, scalar, scalar)
zero = xb.constant(c, onp.array(0, dtype))
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
return xops.SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, pads, source, zero,
scatter)
def _select_and_scatter_add_jvp(
primals, tangents, *, select_prim, window_dimensions, window_strides,
padding):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_scatter_add(
source, operand, select_prim, window_dimensions, window_strides,
padding)
del g_operand
if g_source is ad_util.zero:
tangent_out = ad_util.zero
else:
tangent_out = _select_and_scatter_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding)
return val_out, tangent_out
def _select_and_scatter_add_transpose(
t, source, operand, *, select_prim, window_dimensions, window_strides,
padding):
assert ad.is_undefined_primal(source) and not ad.is_undefined_primal(operand)
source_t = _select_and_gather_add(t, operand, select_prim, window_dimensions,
window_strides, padding)
return [source_t, None]
def _select_and_scatter_add_batch_rule(batched_args, batch_dims, **kwargs):
source, operand = batched_args
s_bdims, o_bdims = batch_dims
if s_bdims is not None and o_bdims is not None:
#TODO(#212): use a map construct instead of unrolling.
source = batching.moveaxis(source, s_bdims, 0)
operand = batching.moveaxis(operand, o_bdims, 0)
outputs = [
_select_and_scatter_add(s, o, **kwargs) for s, o in zip(source, operand)]
outputs = [reshape(out, (1,) + out.shape) for out in outputs]
outputs = concatenate(outputs, 0)
return outputs, 0
elif s_bdims is not None:
#TODO(#212): use a map construct instead of unrolling.
source = batching.moveaxis(source, s_bdims, 0)
outputs = [
_select_and_scatter_add(s, operand, **kwargs) for s in source]
outputs = [reshape(out, (1,) + out.shape) for out in outputs]
outputs = concatenate(outputs, 0)
return outputs, 0
elif o_bdims is not None:
#TODO(#212): use a map construct instead of unrolling.
operand = batching.moveaxis(operand, o_bdims, 0)
outputs = [
_select_and_scatter_add(source, o, **kwargs) for o in operand]
outputs = [reshape(out, (1,) + out.shape) for out in outputs]
outputs = concatenate(outputs, 0)
return outputs, 0
select_and_scatter_add_p = standard_primitive(
_select_and_scatter_add_shape_rule, _input_dtype, 'select_and_scatter_add',
_select_and_scatter_add_translation)
ad.primitive_transposes[select_and_scatter_add_p] = \
_select_and_scatter_add_transpose
ad.primitive_jvps[select_and_scatter_add_p] = _select_and_scatter_add_jvp
batching.primitive_batchers[select_and_scatter_add_p] = \
_select_and_scatter_add_batch_rule
def _select_and_gather_add_shape_rule(
tangents, operand, *, select_prim, window_dimensions, window_strides,
padding):
if tangents.shape != operand.shape:
msg = ("select_and_gather_add tangents and operand shapes must match, "
"got {} and {}.")
raise TypeError(msg.format(tangents.shape, operand.shape))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding)
_UINT_DTYPES = {
16: onp.uint16,
32: onp.uint32,
64: onp.uint64,
}
_INT_DTYPES = {
16: onp.int16,
32: onp.int32,
64: onp.int64,
}
def _select_and_gather_add_translation(
c, tangents, operand, *, select_prim, window_dimensions, window_strides,
padding, max_bits=64):
shape = c.get_shape(operand)
dtype = shape.numpy_dtype()
etype = shape.xla_element_type()
nbits = dtypes.finfo(dtype).bits
assert nbits <= max_bits
double_word_reduction = nbits * 2 <= max_bits
const = lambda c, dtype, x: xb.constant(c, onp.array(x, dtype=dtype),
canonicalize_types=False)
if double_word_reduction:
# TODO(b/73062247): XLA doesn't yet implement ReduceWindow on tuples, so
word_dtype = _UINT_DTYPES[nbits]
double_word_dtype = _UINT_DTYPES[nbits * 2]
word_type = xla_client.dtype_to_etype(word_dtype)
double_word_type = xla_client.dtype_to_etype(double_word_dtype)
def pack(a, b):
a = xops.BitcastConvertType(a, word_type)
b = xops.BitcastConvertType(b, word_type)
a = xops.ConvertElementType(a, double_word_type)
b = xops.ConvertElementType(b, double_word_type)
a = xops.ShiftLeft(a, const(c, double_word_dtype, nbits))
return xops.Or(a, b)
def fst(c, t):
st = xops.ShiftRightLogical(t, const(c, double_word_dtype, nbits))
return xops.BitcastConvertType(xops.ConvertElementType(st, word_type), etype)
def snd(t):
return xops.BitcastConvertType(xops.ConvertElementType(t, word_type), etype)
else:
warnings.warn("Using reduced precision for gradient of reduce-window "
"min/max operator to work around missing XLA support for "
"pair-reductions. This is likely from a second or "
"higher derivative of a max-pooling operation.")
r_nbits = nbits // 2
nexp = dtypes.finfo(dtype).nexp
nmant = r_nbits - nexp - 1
double_word_dtype = word_dtype = _UINT_DTYPES[nbits]
word_type = xla_client.dtype_to_etype(word_dtype)
def pack(a, b):
a = xops.ReducePrecision(a, exponent_bits=nexp, mantissa_bits=nmant)
b = xops.ReducePrecision(b, exponent_bits=nexp, mantissa_bits=nmant)
a = xops.BitcastConvertType(a, word_type)
b = xops.BitcastConvertType(b, word_type)
b = xops.ShiftRightLogical(b, const(c, word_dtype, r_nbits))
return xops.Or(a, b)
def fst(c, t):
st = xops.And(t, const(c, word_dtype, ((1 << r_nbits) - 1) << r_nbits))
return xops.BitcastConvertType(st, etype)
def snd(t):
return xops.BitcastConvertType(xops.ShiftLeft(t, const(c, word_dtype, r_nbits)),
etype)
def reducer():
c = xla_bridge.make_computation_builder("select_and_gather_pair_reducer")
x = xb.parameter(c, 0,
xla_client.Shape.array_shape(onp.dtype(double_word_dtype), ()))
y = xb.parameter(c, 1,
xla_client.Shape.array_shape(onp.dtype(double_word_dtype), ()))
assert select_prim is ge_p or select_prim is le_p
which = xops.Ge if select_prim is ge_p else xops.Le
xops.Select(which(fst(c, x), fst(c, y)), x, y)
return c.build()
assert select_prim is ge_p or select_prim is le_p, select_prim
init = -onp.inf if select_prim is ge_p else onp.inf
pads = xc.window_padding_type_to_pad_values(
padding, c.get_shape(operand).dimensions(), window_dimensions,
window_strides)
out = xops.ReduceWindowWithGeneralPadding(
pack(operand, tangents), pack(const(c, dtype, init), const(c, dtype, 0)),
reducer(), window_dimensions, window_strides, (), (), pads)
return snd(out)
def _select_and_gather_add_jvp(
primals, tangents, *, select_prim, window_dimensions, window_strides,
padding):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_gather_add(
source, operand, select_prim, window_dimensions, window_strides,
padding)
del g_operand
if g_source is ad_util.zero:
tangent_out = ad_util.zero
else:
tangent_out = _select_and_gather_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding)
return val_out, tangent_out
def _select_and_gather_add_transpose(
t, tangents, operand, *, select_prim, window_dimensions, window_strides,
padding):
assert ad.is_undefined_primal(tangents) and not ad.is_undefined_primal(operand)
result = _select_and_scatter_add(t, operand, select_prim, window_dimensions,
window_strides, padding)
return [result, None]
def _select_and_gather_add_batching_rule(
batched_args, batch_dims, *, select_prim, window_dimensions, window_strides,
padding):
t, x = batched_args
t_bdim, x_bdim = batch_dims
size = next(a.shape[bdim] for a, bdim in zip(batched_args, batch_dims)
if bdim is not None)
t = batching.bdim_at_front(t, t_bdim, size)
x = batching.bdim_at_front(x, x_bdim, size)
window_dimensions = (1,) + window_dimensions
window_strides = (1,) + window_strides
out = _select_and_gather_add(t, x, select_prim, window_dimensions,
window_strides, padding)
return (out, 0)
select_and_gather_add_p = standard_primitive(
_select_and_gather_add_shape_rule, _input_dtype, 'select_and_gather_add',
_select_and_gather_add_translation)
ad.primitive_jvps[select_and_gather_add_p] = _select_and_gather_add_jvp
ad.primitive_transposes[select_and_gather_add_p] = \
_select_and_gather_add_transpose
batching.primitive_batchers[select_and_gather_add_p] = \
_select_and_gather_add_batching_rule
xla.backend_specific_translations['tpu'][select_and_gather_add_p] = partial(
_select_and_gather_add_translation,
max_bits=32)
def _prescan_power_of_two(x, axis: int, op: Callable, unit):
n = x.shape[axis]
assert n != 0 and n & (n - 1) == 0, "n must be a power of 2"
xs = []
for d in range(0, n.bit_length() - 1):
x1 = slice_in_dim(x, 0, None, stride=2, axis=axis)
xs.append(x1)
x2 = slice_in_dim(x, 1, None, stride=2, axis=axis)
x = op(x1, x2)
total = x
x = full_like(total, unit)
pad_left = [(0, 0, 0)] * len(x.shape)
pad_left[axis] = (1, 0, 1)
pad_right = [(0, 0, 0)] * len(x.shape)
pad_right[axis] = (0, 1, 1)
for w in reversed(xs):
x1 = pad(x, _const(x, 0), pad_right)
x2 = pad(x, _const(x, 0), pad_left)
w = pad(w, _const(x, 0), pad_left)
x = x1 + op(x2, w)
return x, total
def _parallel_prefix_scan(x, axis: int, op: Callable, unit):
n = x.shape[axis]
if n == 0:
return x
nbits = n.bit_length()
if n == (1 << (nbits - 1)):
nbits -= 1
padding = [(0, 0, 0)] * len(x.shape)
padding[axis] = (0, (1 << nbits) - n, 0)
x = pad(x, _const(x, unit), padding)
x, total = _prescan_power_of_two(x, axis, op, unit)
return concatenate((slice_in_dim(x, 1, n, axis=axis), total), dimension=axis)
_cumsum_prefix_scan = partial(_parallel_prefix_scan, op=add, unit=0)
_cumprod_prefix_scan = partial(_parallel_prefix_scan, op=mul, unit=1)
def _cumred_shape_rule(x, *, axis: int):
if axis < 0 or axis >= x.ndim:
raise ValueError(
"axis {} is out of bounds for array of shape {}".format(axis, x.shape))
return x.shape
def _cumsum_transpose_rule(t, *, axis: int):
return [rev(cumsum(rev(t, (axis,)), axis=axis), (axis,))]
def _cumprod_jvp_rule(primals, tangents, *, axis: int):
return api.jvp(partial(_cumprod_prefix_scan, axis=axis), primals, tangents)
def _cumred_tpu_translation_rule(window_reduce: Callable, unit, x, *,
axis: int):
n = x.shape[axis]
if n == 0:
return x
padding = [(0, 0, 0)] * x.ndim
padding[axis] = (n - 1, 0, 0)
x = pad(x, _const(x, unit), padding)
strides = [1] * x.ndim
window_dims = [1] * x.ndim
window_dims[axis] = n
return window_reduce(x, window_dims, strides, xla_client.PaddingType.VALID)
def _cumred_batch_rule(prim, batched_args, batch_dims, *, axis: int):
operand, = batched_args
bdim, = batch_dims
axis = axis if axis < bdim else axis + 1
return prim.bind(operand, axis=axis), bdim
cumsum_p = standard_primitive(
_cumred_shape_rule, partial(_reduce_number_dtype_rule, "cumsum"),
'cumsum', xla.lower_fun(_cumsum_prefix_scan, multiple_results=False))
ad.deflinear(cumsum_p, _cumsum_transpose_rule)
xla.backend_specific_translations['tpu'][cumsum_p] = xla.lower_fun(
partial(_cumred_tpu_translation_rule, _reduce_window_sum, 0),
multiple_results=False)
batching.primitive_batchers[cumsum_p] = partial(_cumred_batch_rule, cumsum_p)
cumprod_p = standard_primitive(
_cumred_shape_rule, partial(_reduce_number_dtype_rule, "cumprod"),
'cumprod', xla.lower_fun(_cumprod_prefix_scan, multiple_results=False))
ad.primitive_jvps[cumprod_p] = _cumprod_jvp_rule
xla.backend_specific_translations['tpu'][cumprod_p] = xla.lower_fun(
partial(_cumred_tpu_translation_rule, _reduce_window_prod, 1),
multiple_results=False)
batching.primitive_batchers[cumprod_p] = partial(_cumred_batch_rule, cumprod_p)
def _sort_abstract_eval(*args, **kwargs):
args = tuple(raise_to_shaped(arg) for arg in args)
if any(arg.shape != args[0].shape for arg in args[1:]):
shapes = " ".join(str(a.shape) for a in args)
raise TypeError(f"Arguments to sort must have equal shapes, got: {shapes}")
return args
def _float_to_int_for_sort(x):
if x.dtype == dtypes.bfloat16:
x = convert_element_type(x, onp.float32)
nbits = onp.finfo(x).bits
signed_dtype = _INT_DTYPES[nbits]
unsigned_dtype = _UINT_DTYPES[nbits]
signed = bitcast_convert_type(x, signed_dtype)
unsigned = bitcast_convert_type(x, unsigned_dtype)
flipped = bitcast_convert_type(
sub(unsigned_dtype(onp.iinfo(signed_dtype).max), unsigned), signed_dtype)
return select(lt(signed, _zero(signed)), flipped, signed)
# This code adds complex-number support to the algorithm from:
# https://github.com/tensorflow/tensorflow/blob/ba43780830f09da72081fe5061c436f1c6203a92/tensorflow/compiler/xla/client/lib/comparators.h#L33
def _sort_lt_comparator(*operands):
assert len(operands) >= 2 and len(operands) % 2 == 0, operands
x, y = operands[:2]
assert x.dtype == y.dtype, (x.dtype, y.dtype)
if onp.issubdtype(x.dtype, onp.complexfloating):
x_keys = [_float_to_int_for_sort(real(x)), _float_to_int_for_sort(imag(x))]
y_keys = [_float_to_int_for_sort(real(y)), _float_to_int_for_sort(imag(y))]
elif onp.issubdtype(x.dtype, onp.floating):
x_keys = [_float_to_int_for_sort(x)]
y_keys = [_float_to_int_for_sort(y)]
else:
x_keys = [x]
y_keys = [y]
p = None
for xk, yk in zip(x_keys[::-1], y_keys[::-1]):
p = (bitwise_or(lt(xk, yk), bitwise_and(eq(xk, yk), p)) if p is not None
else lt(xk, yk))
return p
def _sort_translation_rule(c, *operands, dimension):
types = [c.get_shape(x).xla_element_type() for x in operands]
subc = xla_bridge.make_computation_builder("sort_lt_comparator")
params = [xb.parameter(subc, 2 * i + j, xc.Shape.array_shape(typ, ()))
for i, typ in enumerate(types) for j in range(2)]
result = xla.lower_fun(_sort_lt_comparator,
multiple_results=False)(subc, *params)
comparator = subc.build(result)
out = xops.Sort(c, operands, dimension=dimension, is_stable=True,
comparator=comparator)
return out if len(operands) != 1 else xops.Tuple(c, [out])
def _sort_jvp(primals, tangents, *, dimension):
shape = primals[0].shape
iotas = []
for dim, size in enumerate(shape):
dtype = onp.int32 if size < onp.iinfo(onp.int32).max else onp.int64
iotas.append(broadcasted_iota(dtype, shape, dim))
primals = sort_p.bind(*(primals + (iotas[dimension],)), dimension=dimension)
idx = tuple(primals[-1] if i == dimension else iotas[i]
for i in range(len(shape)))
tangents_out = tuple(ad_util.zero if t is ad_util.zero else t[idx]
for t in tangents)
return tuple(primals[:-1]), tangents_out
def _sort_batch_rule(batched_args, batch_dims, *, dimension):
prototype_arg, new_bdim = next(
(a, b) for a, b in zip(batched_args, batch_dims) if b is not None)
new_args = []
for arg, bdim in zip(batched_args, batch_dims):
if bdim is None:
dims = onp.delete(onp.arange(prototype_arg.ndim), new_bdim)
new_args.append(broadcast_in_dim(arg, prototype_arg.shape, dims))
else:
new_args.append(batching.moveaxis(arg, bdim, new_bdim))
new_dimension = dimension + (new_bdim <= dimension)
bdims = (new_bdim,) * len(new_args)
return sort_p.bind(*new_args, dimension=new_dimension), bdims
sort_p = Primitive('sort')
sort_p.multiple_results = True
sort_p.def_impl(partial(xla.apply_primitive, sort_p))
sort_p.def_abstract_eval(_sort_abstract_eval)
xla.translations[sort_p] = _sort_translation_rule
ad.primitive_jvps[sort_p] = _sort_jvp
batching.primitive_batchers[sort_p] = _sort_batch_rule
def _top_k_abstract_eval(operand, *, k):
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
if len(operand.shape) == 0:
raise TypeError("top_k operand must have >= 1 dimension, got {}"
.format(operand.shape))
shape = list(operand.shape)
if shape[-1] < k:
msg = "k argument to top_k must be no larger than minor dimension; {} vs {}"
raise ValueError(msg.format(k, shape))
shape[-1] = k
return (ShapedArray(shape, operand.dtype),
ShapedArray(shape, onp.dtype(onp.int32)))
def _top_k_jvp(primals, tangents, *, k):
operand, = primals
tangent, = tangents
primals_out = top_k(operand, k)
if tangent is ad_util.zero:
tangents_out = (ad_util.zero, ad_util.zero)
else:
_, k_idxs = primals_out
idx_shape = k_idxs.shape
rank = len(idx_shape)
gather_index_shape = idx_shape + (1,)
gather_indices = []
for i in range(rank-1):
_iota = iota(k_idxs.dtype, idx_shape[i])
_iota = tie_in(operand, _iota)
_iota = broadcast_in_dim(_iota, gather_index_shape, (i,))
gather_indices.append(_iota)
gather_indices.append(reshape(k_idxs, gather_index_shape))
gather_indices = concatenate(gather_indices, dimension=rank)
slice_sizes = (1,) * rank
dnums = GatherDimensionNumbers(
offset_dims=(),
collapsed_slice_dims=tuple(range(rank)),
start_index_map=tuple(range(rank)))
tangents_out = (gather(tangent, gather_indices, dnums, slice_sizes),
ad_util.zero)
return primals_out, tangents_out
def _top_k_batch_rule(batched_args, batch_dims, *, k):
operand, = batched_args
bdim, = batch_dims
if bdim == operand.ndim-1:
perm = onp.arange(operand.ndim)
perm[bdim-1], perm[bdim] = perm[bdim], perm[bdim-1]
top_k_v, top_k_i = top_k(transpose(operand, perm), k=k)
return (transpose(top_k_v, perm),
transpose(top_k_i, perm)), (bdim, bdim)
else:
return top_k(operand, k=k), (bdim, bdim)
top_k_p = Primitive('top_k')
top_k_p.multiple_results = True
top_k_p.def_impl(partial(xla.apply_primitive, top_k_p))
top_k_p.def_abstract_eval(_top_k_abstract_eval)
xla.translations[top_k_p] = partial(standard_translate, 'top_k')
ad.primitive_jvps[top_k_p] = _top_k_jvp
batching.primitive_batchers[top_k_p] = _top_k_batch_rule
def _tie_in_transpose_rule(t):
return [ad_util.zero, t]
def _tie_in_batch_rule(batched_args, batch_dims):
y = tie_in(*batched_args)
_, bdim_y = batch_dims
return y, bdim_y
tie_in_p = Primitive('tie_in')
tie_in_p.def_impl(lambda x, y: y)
tie_in_p.def_abstract_eval(lambda x, y: raise_to_shaped(y))
xla.translations[tie_in_p] = lambda c, x, y: y
ad.deflinear(tie_in_p, _tie_in_transpose_rule)
batching.primitive_batchers[tie_in_p] = _tie_in_batch_rule
masking.masking_rules[tie_in_p] = lambda vals, logical_shapes: vals[1]
def _stop_gradient_jvp_rule(primals, tangents):
# if we don't call stop_gradient here, we'd only peel off one autodiff tracer
x, = primals
return stop_gradient(x), ad_util.zero
def _stop_gradient_batch_rule(batched_args, batch_dims):
x, = batched_args
dim, = batch_dims
return stop_gradient(x), dim
xla.translations[ad_util.stop_gradient_p] = lambda c, x: x
ad.primitive_jvps[ad_util.stop_gradient_p] = _stop_gradient_jvp_rule
batching.primitive_batchers[ad_util.stop_gradient_p] = _stop_gradient_batch_rule
def create_token(x):
# x is a dummy argument used to tie the operator into a trace.
return create_token_p.bind(x)
create_token_p = Primitive("create_token")
create_token_p.def_impl(partial(xla.apply_primitive, create_token_p))
create_token_p.def_abstract_eval(lambda _: abstract_token)
xla.translations[create_token_p] = lambda c, _: xops.CreateToken(c)
def after_all(*operands):
return after_all_p.bind(*operands)
def _after_all_abstract_eval(*operands):
if any(x is not abstract_token for x in operands):
raise TypeError("Arguments to after_all must be tokens")
return abstract_token
def _after_all_translation_rule(c, *operands):
return xops.AfterAll(c, operands)
after_all_p = Primitive("after_all")
after_all_p.def_impl(partial(xla.apply_primitive, after_all_p))
after_all_p.def_abstract_eval(_after_all_abstract_eval)
xla.translations[after_all_p] = _after_all_translation_rule
def infeed(token, shape=None):
flat_shapes, treedef = pytree.flatten(shape)
for shape in flat_shapes:
if not isinstance(shape, ShapedArray):
raise TypeError("shape argument to infeed must be a pytree of "
"ShapedArray values, got {}".format(shape))
xs_and_token = infeed_p.bind(token, shapes=tuple(flat_shapes))
return (treedef.unflatten(xs_and_token[:-1]), xs_and_token[-1])
def _infeed_abstract_eval(token, *, shapes):
if token is not abstract_token:
raise TypeError("First argument to infeed must be a token")
return shapes + (abstract_token,)
def _infeed_translation_rule(c, token, *, shapes):
shape = tuple(xla.aval_to_xla_shape(x).with_major_to_minor_layout_if_absent()
for x in shapes)
xs_and_token = xops.InfeedWithToken(token,
xla_client.Shape.tuple_shape(shape))
xs = xops.GetTupleElement(xs_and_token, 0)
token = xops.GetTupleElement(xs_and_token, 1)
outs = [xops.GetTupleElement(xs, i) for i in range(len(shapes))] + [token]
return xops.Tuple(c, outs)
infeed_p = Primitive("infeed")
infeed_p.multiple_results = True
infeed_p.def_impl(partial(xla.apply_primitive, infeed_p))
infeed_p.def_abstract_eval(_infeed_abstract_eval)
xla.translations[infeed_p] = _infeed_translation_rule
def outfeed(token, xs):
flat_xs, _ = pytree.flatten(xs)
return outfeed_p.bind(token, *flat_xs)
def _outfeed_abstract_eval(token, *xs):
if token is not abstract_token:
raise TypeError("First argument to outfeed must be a token")
return abstract_token
def _outfeed_translation_rule(c, token, *xs):
t = xops.Tuple(c, xs)
return xops.OutfeedWithToken(t, token, c.get_shape(t))
outfeed_p = Primitive("outfeed")
outfeed_p.def_impl(partial(xla.apply_primitive, outfeed_p))
outfeed_p.def_abstract_eval(_outfeed_abstract_eval)
xla.translations[outfeed_p] = _outfeed_translation_rule
def rng_uniform(a, b, shape):
return rng_uniform_p.bind(a, b, shape=tuple(shape))
def _rng_uniform_abstract_eval(a, b, *, shape):
if a.dtype != b.dtype:
raise ValueError(
"Arguments to rng_uniform must have identical dtypes, got {} "
"and {}.".format(a.dtype, b.dtype))
if a.shape != () or b.shape != ():
raise ValueError(
"Arguments to rng_uniform must be scalars; got shapes {} and {}."
.format(a.shape, b.shape))
return ShapedArray(shape, a.dtype)
def _rng_uniform_translation_rule(c, a, b, *, shape):
xla_shape = xc.Shape.array_shape(c.get_shape(a).xla_element_type(), shape)
return xops.RngUniform(a, b, xla_shape)
rng_uniform_p = Primitive("rng_uniform")
rng_uniform_p.def_impl(partial(xla.apply_primitive, rng_uniform_p))
rng_uniform_p.def_abstract_eval(_rng_uniform_abstract_eval)
xla.translations[rng_uniform_p] = _rng_uniform_translation_rule
### util
_ndim = onp.ndim
def _dilate_shape(shape, dilation):
if not onp.all(onp.greater(dilation, 0)):
msg = "All dilations must be positive, got {}."
raise TypeError(msg.format(dilation))
dilation = (1,) * (len(shape) - len(dilation)) + tuple(dilation)
return onp.where(shape == 0, 0,
onp.multiply(dilation, onp.subtract(shape, 1)) + 1)
def _ceil_divide(x1, x2):
return -onp.floor_divide(onp.negative(x1), x2)
def padtype_to_pads(in_shape, window_shape, window_strides, padding):
PaddingType = xla_client.PaddingType
if isinstance(padding, str):
mapping = {'VALID': PaddingType.VALID, 'SAME': PaddingType.SAME}
try:
padding = mapping[padding.upper()]
except KeyError as err:
msg = "Unrecognized padding type: expected 'VALID' or 'SAME', got {}."
raise RuntimeError(msg.format(padding)) from err
if padding == PaddingType.SAME:
out_shape = _ceil_divide(in_shape, window_strides)
pad_sizes = onp.maximum(0, (out_shape - 1) * window_strides +
window_shape - in_shape)
return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]
elif padding == PaddingType.VALID:
return [(0, 0)] * len(in_shape)
else:
msg = "Unknown padding type: {}."
raise TypeError(msg.format(padding))
def _check_same_dtypes(name, ignore_fp_precision, *ttypes):
# the `ignore_fp_precision` flag exists because the XLA shape inference logic
# allows mixed floating point precision, but the HLO verifier often rejects it
types = list(map(onp.dtype, ttypes)) # canonicalize
if ignore_fp_precision:
types = [
onp.floating if dtypes.issubdtype(dtype, onp.floating)
else onp.complexfloating if dtypes.issubdtype(dtype, onp.complexfloating)
else dtype for dtype in types]
if len({dtypes.canonicalize_dtype(t) for t in types}) != 1:
if ignore_fp_precision:
msg = ("{} requires arguments to have same dtypes up to floating point "
"precision, got {}.")
else:
msg = "{} requires arguments to have the same dtypes, got {}."
raise TypeError(msg.format(name, ", ".join(map(str, types))))
def _check_conv_shapes(name, lhs_shape, rhs_shape, window_strides):
if len(lhs_shape) != len(rhs_shape):
msg = "Arguments to {} must have same rank, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if len(lhs_shape) < 2:
msg = "Arguments to {} must have rank at least 2, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if lhs_shape[1] != rhs_shape[1]:
msg = "Arguments to {} must agree on input feature size, got {} and {}."
raise TypeError(msg.format(name, lhs_shape[1], rhs_shape[1]))
_check_shapelike(name, "window_strides", window_strides)
if not onp.all(onp.greater(window_strides, 0)):
msg = "All elements of window_strides must be positive, got {}."
raise TypeError(msg.format(window_strides))
if len(window_strides) != len(lhs_shape) - 2:
msg = "{} window_strides has wrong length: expected {}, got {}."
expected_length = len(lhs_shape) - 2
raise TypeError(msg.format(name, expected_length, len(window_strides)))
def conv_shape_tuple(lhs_shape, rhs_shape, strides, pads, batch_group_count=1):
if isinstance(pads, str):
pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)
if len(pads) != len(lhs_shape) - 2:
msg = "Wrong number of explicit pads for convolution: expected {}, got {}."
raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))
lhs_padded = onp.add(lhs_shape[2:], onp.sum(onp.array(pads).reshape(-1, 2),
axis=1))
out_space = onp.floor_divide(
onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1
out_space = onp.maximum(0, out_space)
assert lhs_shape[0] % batch_group_count == 0
out_shape = (lhs_shape[0] // batch_group_count, rhs_shape[0])
return tuple(out_shape + tuple(out_space))
def conv_general_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = onp.take(lhs_shape, lhs_perm)
rhs_trans = onp.take(rhs_shape, rhs_perm)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding)
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def conv_transpose_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = onp.take(lhs_shape, lhs_perm)
rhs_trans = onp.take(rhs_shape, rhs_perm)
if isinstance(padding, str):
padding = [_conv_transpose_padding(k, s, padding)
for k,s in zip(rhs_trans[2:], window_strides)]
padding = list(map(onp.sum, padding))
unpad_out_space = [(i-1) * s - k + 2
for i, k, s in zip(lhs_trans[2:],
rhs_trans[2:],
window_strides)]
out_space = onp.sum([unpad_out_space, padding], axis=0).tolist()
out_trans = tuple((lhs_trans[0], rhs_trans[0]) + tuple(out_space))
return tuple(onp.take(out_trans, onp.argsort(out_perm)))
def _check_shapelike(fun_name, arg_name, obj):
if not isinstance(obj, (tuple, list, onp.ndarray)):
msg = "{} {} must be of type tuple/list/ndarray, got {}."
raise TypeError(msg.format(fun_name, arg_name, type(obj)))
# bool(obj) for an ndarray raises an error, so we check len
if not len(obj): # pylint: disable=g-explicit-length-test
return
obj_arr = onp.array(obj)
if obj_arr.ndim != 1:
msg = "{} {} must be rank 1, got {}."
raise TypeError(msg.format(obj_arr.ndim))
try:
canonicalize_shape(obj_arr)
except TypeError:
msg = "{} {} must have every element be an integer type, got {}."
raise TypeError(msg.format(fun_name, arg_name, tuple(map(type, obj))))
if not (obj_arr >= 0).all():
msg = "{} {} must have every element be nonnegative, got {}."
raise TypeError(msg.format(fun_name, arg_name, obj))
def _dynamic_slice_indices(operand, start_indices):
if not isinstance(start_indices, (tuple, list)):
if start_indices.ndim != 1:
raise ValueError("Slice indices must be a 1D sequence, got {}"
.format(start_indices.shape))
start_indices = [reshape(slice(start_indices, [i], [i+1]), ())
for i in range(operand.ndim)]
else:
start_indices = [onp.asarray(i, dtype=dtypes.int_) if isinstance(i, int)
else i for i in start_indices]
if len(start_indices) != operand.ndim:
msg = ("Length of slice indices must match number of operand dimensions ({} "
"vs {})")
raise ValueError(msg.format(len(start_indices), operand.shape))
# map int over operand.shape to raise any dynamic-shape errors
return [select(lt(i, _const(i, 0)), add(i, _const(i, int(d))), i)
for i, d in zip(start_indices, operand.shape)]
def _const(example, val):
if dtypes.is_python_scalar(example):
return dtypes.scalar_type_of(example)(val)
return onp.array(val, _dtype(example))
_zeros: Callable = partial(full_like, fill_value=0)
_zero: Callable = partial(full_like, shape=(), fill_value=0)
_ones: Callable = partial(full_like, fill_value=1)
_one: Callable = partial(full_like, shape=(), fill_value=1)
_twos: Callable = partial(full_like, fill_value=2)
_two: Callable = partial(full_like, shape=(), fill_value=2)
dtype: Callable = dtypes.result_type
_dtype: Callable = dtypes.result_type
def _iscomplex(x) -> bool:
return dtypes.issubdtype(_dtype(x), onp.complexfloating)
def ranges_like(*xs):
start = 0
for x in xs:
x_len = len(x)
yield range(start, start + x_len)
start += x_len
def remaining(original, *removed_lists):
blacklist = set(itertools.chain(*removed_lists))
return [i for i in original if i not in blacklist]
def _canonicalize_precision(precision):
if precision is None:
return None
if isinstance(precision, Precision):
return precision
else:
msg = "Precision argument must be None or a lax.Precision value; got {}"
raise ValueError(msg.format(precision))
def conv_dimension_numbers(lhs_shape, rhs_shape, dimension_numbers):
if isinstance(dimension_numbers, ConvDimensionNumbers):
return dimension_numbers
if len(lhs_shape) != len(rhs_shape):
msg = "convolution requires lhs and rhs ndim to be equal, got {} and {}."
raise TypeError(msg.format(len(lhs_shape), len(rhs_shape)))
if dimension_numbers is None:
iota = tuple(range(len(lhs_shape)))
return ConvDimensionNumbers(iota, iota, iota)
elif isinstance(dimension_numbers, (list, tuple)):
if len(dimension_numbers) != 3:
msg = "convolution dimension_numbers list/tuple must be length 3, got {}."
raise TypeError(msg.format(len(dimension_numbers)))
if not all(isinstance(elt, str) for elt in dimension_numbers):
msg = "convolution dimension_numbers elements must be strings, got {}."
raise TypeError(msg.format(tuple(map(type, dimension_numbers))))
msg = ("convolution dimension_numbers[{}] must have len equal to the ndim "
"of lhs and rhs, got {} for lhs and rhs shapes {} and {}.")
for i, elt in enumerate(dimension_numbers):
if len(elt) != len(lhs_shape):
raise TypeError(msg.format(i, len(elt), lhs_shape, rhs_shape))
lhs_spec, rhs_spec, out_spec = conv_general_permutations(dimension_numbers)
return ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)
else:
msg = "convolution dimension_numbers must be tuple/list or None, got {}."
raise TypeError(msg.format(type(dimension_numbers)))
def conv_general_permutations(dimension_numbers):
lhs_spec, rhs_spec, out_spec = dimension_numbers
lhs_char, rhs_char, out_char = charpairs = ("N", "C"), ("O", "I"), ("N", "C")
for i, (a, b) in enumerate(charpairs):
if not dimension_numbers[i].count(a) == dimension_numbers[i].count(b) == 1:
msg = ("convolution dimension_numbers[{}] must contain the characters "
"'{}' and '{}' exactly once, got {}.")
raise TypeError(msg.format(i, a, b, dimension_numbers[i]))
if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):
msg = ("convolution dimension_numbers[{}] cannot have duplicate "
"characters, got {}.")
raise TypeError(msg.format(i, dimension_numbers[i]))
if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==
set(out_spec) - set(out_char)):
msg = ("convolution dimension_numbers elements must each have the same "
"set of spatial characters, got {}.")
raise TypeError(msg.format(dimension_numbers))
def getperm(spec, charpair):
spatial = (i for i, c in enumerate(spec) if c not in charpair)
if spec is not rhs_spec:
spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))
return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)
lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)
return lhs_perm, rhs_perm, out_perm
def _conv_general_proto(dimension_numbers):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_client.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _conv_general_vjp_lhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
pad_before = onp.subtract(rhs_dilated_shape, [lo for lo, _ in padding]) - 1
pad_after = (onp.add(lhs_dilated_shape, rhs_dilated_shape) - 1
- out_dilated_shape - pad_before)
return zip(pad_before, pad_after)
def _conv_general_vjp_rhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
total_in_pad = out_dilated_shape + rhs_dilated_shape - lhs_dilated_shape - 1
return [(pad[0], tot - pad[0]) for pad, tot in zip(padding, total_in_pad)]
def _balanced_eq(x, z, y):
return div(select(_eq_meet(x, z), _ones(z), _zeros(z)),
select(_eq_meet(y, z), _twos(z), _ones(z)))
def _eq_meet(a, b):
a_dtype, b_dtype = _dtype(a), _dtype(b)
if a_dtype != b_dtype:
higher_dtype = dtypes.promote_types(a_dtype, b_dtype)
if higher_dtype == a_dtype:
a = convert_element_type(a, b_dtype)
else:
b = convert_element_type(b, a_dtype)
return eq(a, b)
def _abstractify(x):
return raise_to_shaped(core.get_aval(x))
def _check_user_dtype_supported(dtype, fun_name=None):
onp_dtype = onp.dtype(dtype)
if onp_dtype.kind not in "biufc" and onp_dtype.type != dtypes.bfloat16:
msg = f"JAX only supports number and bool dtypes, got dtype {dtype}"
raise TypeError(msg)
if dtype is not None and onp_dtype != dtypes.canonicalize_dtype(dtype):
msg = ("Explicitly requested dtype {} {} is not available, "
"and will be truncated to dtype {}. To enable more dtypes, set the "
"jax_enable_x64 configuration option or the JAX_ENABLE_X64 shell "
"environment variable. "
"See https://github.com/google/jax#current-gotchas for more.")
fun_name = "requested in {}".format(fun_name) if fun_name else ""
truncated_dtype = dtypes.canonicalize_dtype(dtype).name
warnings.warn(msg.format(dtype, fun_name , truncated_dtype))
def _canonicalize_axis(axis, num_dims):
axis = int(axis)
if axis < 0:
axis = axis + num_dims
if axis < 0 or axis >= num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
return axis
| true | true |
f713f4cb4e88d6989f2ec465138945e7b6d049ed | 231 | py | Python | santa_helpers/__init__.py | lenarother/santa-helpers | 0498b9922b357c98543929a39d9755085da527b0 | [
"MIT"
] | null | null | null | santa_helpers/__init__.py | lenarother/santa-helpers | 0498b9922b357c98543929a39d9755085da527b0 | [
"MIT"
] | null | null | null | santa_helpers/__init__.py | lenarother/santa-helpers | 0498b9922b357c98543929a39d9755085da527b0 | [
"MIT"
] | null | null | null | """Top-level package for santa-helpers."""
__author__ = """Magdalena Rother"""
__email__ = 'rother.magdalena@gmail.com'
__version__ = '0.0.1'
from .neighbors import neighbors # noqa
from .parse import parse_grid_to_dict # noqa
| 25.666667 | 45 | 0.735931 |
__author__ = """Magdalena Rother"""
__email__ = 'rother.magdalena@gmail.com'
__version__ = '0.0.1'
from .neighbors import neighbors
from .parse import parse_grid_to_dict
| true | true |
f713f6e459931ce5f210c3897d1bf4d851edf6bf | 1,194 | py | Python | tests/test_class_oelint_vars_bugtrackerurl.py | vermaete/oelint-adv | 6a2cadf2fd076d6e531d4bb0abcc4ad89d1fdbee | [
"BSD-2-Clause"
] | null | null | null | tests/test_class_oelint_vars_bugtrackerurl.py | vermaete/oelint-adv | 6a2cadf2fd076d6e531d4bb0abcc4ad89d1fdbee | [
"BSD-2-Clause"
] | null | null | null | tests/test_class_oelint_vars_bugtrackerurl.py | vermaete/oelint-adv | 6a2cadf2fd076d6e531d4bb0abcc4ad89d1fdbee | [
"BSD-2-Clause"
] | null | null | null | import os
import sys
import pytest
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from base import TestBaseClass
class TestClassOelintVarsBugtrackerIsUrl(TestBaseClass):
@pytest.mark.parametrize('id', ['oelint.vars.bugtrackerisurl'])
@pytest.mark.parametrize('occurrence', [1])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
BUGTRACKER = "what_/the/f"
'''
},
{
'oelint_adv_test.bb':
'''
BUGTRACKER = "what_/the/f"
'''
},
],
)
def test_bad(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
@pytest.mark.parametrize('id', ['oelint.vars.bugtrackerisurl'])
@pytest.mark.parametrize('occurrence', [0])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
BUGTRACKER = "https://foo.com"
'''
},
],
)
def test_good(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
| 24.875 | 67 | 0.536851 | import os
import sys
import pytest
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from base import TestBaseClass
class TestClassOelintVarsBugtrackerIsUrl(TestBaseClass):
@pytest.mark.parametrize('id', ['oelint.vars.bugtrackerisurl'])
@pytest.mark.parametrize('occurrence', [1])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
BUGTRACKER = "what_/the/f"
'''
},
{
'oelint_adv_test.bb':
'''
BUGTRACKER = "what_/the/f"
'''
},
],
)
def test_bad(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
@pytest.mark.parametrize('id', ['oelint.vars.bugtrackerisurl'])
@pytest.mark.parametrize('occurrence', [0])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
BUGTRACKER = "https://foo.com"
'''
},
],
)
def test_good(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
| true | true |
f713f744998c6af0cf3cc3d45319704b6dcf176c | 99 | py | Python | spacetrading/apps.py | claudiobierig/doppeldenk | 770cd5322753450834ec393a0801de1d2de2bfa2 | [
"MIT"
] | 1 | 2020-11-08T12:32:36.000Z | 2020-11-08T12:32:36.000Z | spacetrading/apps.py | claudiobierig/doppeldenk | 770cd5322753450834ec393a0801de1d2de2bfa2 | [
"MIT"
] | 1 | 2021-06-04T22:23:30.000Z | 2021-06-04T22:23:30.000Z | spacetrading/apps.py | claudiobierig/doppeldenk | 770cd5322753450834ec393a0801de1d2de2bfa2 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class SpacetradingConfig(AppConfig):
name = 'spacetrading'
| 16.5 | 36 | 0.777778 | from django.apps import AppConfig
class SpacetradingConfig(AppConfig):
name = 'spacetrading'
| true | true |
f713f7962135e6e3e3c9bc85590f2aebb3b24ea0 | 4,956 | py | Python | cookie/cookie.py | ChairsDaily/cookie | 58e294a6f92d061ab96683b7fda13d32daefe3a2 | [
"MIT"
] | null | null | null | cookie/cookie.py | ChairsDaily/cookie | 58e294a6f92d061ab96683b7fda13d32daefe3a2 | [
"MIT"
] | null | null | null | cookie/cookie.py | ChairsDaily/cookie | 58e294a6f92d061ab96683b7fda13d32daefe3a2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
Beautiful command line parsing
@author chairs
"""
import inspect, sys
from collections import namedtuple
from collections import defaultdict
from subprocess import DEVNULL
class Cookie (object):
"""
Main decorator object
@param name of application
"""
def __init__ (self, app_name, notes=()):
self.optarg = namedtuple('optarg',
['full', 'abbrev', 'default'])
self.name = str(app_name)
self.notes = notes
def __parse (self, args):
"""
Parse command line arguments from argv, built to be simple
and as fast as possible to avoid application overhead
@param command line arguments
@return necessary destinations and identifiers
"""
ordered = list(); full = abbrev = dict()
args = args + ['']
i = 0
while i < len(args) - 1:
token = args[i]
next_token = args[i + 1]
# the full argument case
if token.startswith('--'):
if next_token.startswith('-'):
raise ValueError('{} incomplete'.format(token))
else:
full[token[2:]] = next_token
i += 2
# the shorthand argument case (more common)
elif token.startswith('-'):
if next_token.startswith('-'):
raise ValueError('{} incomplete'.format(token))
else:
abbrev[token[1:]] = next_token
i += 2
else:
ordered.append(token)
i += 1
return ordered, full, abbrev
def __construct_ordered (self, params):
"""
Build the ordered parameters (those without flags, positional)
@param parameters from parse
@return all exclusively oredered arguments
"""
return [key for key, arg in params.items() if arg.default == inspect._empty]
def __construct_optional (self, params):
"""
Build the optional parameters (those with flags, switches)
@param parameters from parse
@return all exclusively optional arguments
"""
args = []
filtered = {
key: arg.default for key, arg in params.items() if arg.default != inspect._empty}
for key, default in filtered.items():
arg = self.optarg(full=key, abbrev=key[0].lower(), default=default)
args.append(arg)
args_full = args_abbrev = dict()
# resolve possible conflicts
known_count = defaultdict(int)
for arg in args:
args_full[arg.full] = arg
if known_count[arg.abbrev] == 0: args_abbrev[arg.abbrev] = arg
elif known_count[arg.abbrev] == 1:
# establish abbreviation
new_abbrev = arg.apprev.upper()
args_full[arg.full] = self.optarg(
full=arg.full,
abbrev=new_abbrev,
default=arg.default)
args_abbrev[new_abbrev] = args_full[arg.full]
else:
new_abbrev = arg.apprev.upper() + str(known_count[arg.abbrev])
args_full[arg.full] = self.optarg(
full=arg.full,
abbrev=new_abbrev,
default=arg.default)
args_abbrev[new_abbrev] = args_full[arg.full]
known_count[arg.abbrev] += 1
return args_full, args_abbrev
def __resolve (self, args, signature):
"""
Resolve arguments final destinations
@param args arguments from construction
@param signatures
@return final destinations
"""
ordered, opt_parsed_full, opt_parsed_abbrev = self.__parse(args[1:])
ordered_def = self.__construct_ordered(signature.parameters)
if len(ordered) != len(ordered_def):
raise Exception('wrong number of oredered arguments')
opt_parsed = dict()
opt_parsed.update(opt_parsed_full)
opt_parsed.update(opt_parsed_abbrev)
opt_def_full, opt_def_abbrev = self.__construct_optional(signature.parameters)
optional = {o.full: o.default for o in opt_def_full.values()}
opt_def = dict()
opt_def.update(opt_def_full)
opt_def.update(opt_def_abbrev)
for key, value in opt_parsed.items():
if key not in opt_def: raise Exception('resolution error')
d = opt_def[key]
optional[d.full] = value
return ordered, optional
def __usage_outline (self, signature):
"""
Nice formatted help message to outline usage
@param signature for arguments
"""
ordered = self.__construct_ordered(signature.parameters)
full, _ = self.__construct_optional(signature.parameters)
ordered_str = ' '.join(name.upper() for name in ordered)
optional_str = ' '.join('\n[-{} | --{} {}],'.format(
opt.abbrev, opt.full, opt.full.upper()) for opt in full.values())
optional_str = ''.join(optional_str.split(',')[::2])
return '{} {}'.format(ordered_str, optional_str)
def get_args (self, function):
"""
The main decorator, the glue
"""
def wrapper ():
sig = inspect.signature(function)
try:
ordered, optional = self.__resolve(sys.argv, sig)
except Exception:
self.outline = ('Usage: ', sys.argv[0], self.__usage_outline(sig,))
print(*self.outline)
if not self.notes == ():
print('\n'.join(self.notes) + '\n'+'\t'*1 + 'respectively')
return
function(*ordered, **optional)
return wrapper
def run (self, function_name, silent=False):
restore = sys.stdout
if silent:
sys.stdout = open('/dev/null', 'w').close()
function_name()
sys.stdout = restore
| 26.084211 | 84 | 0.682002 |
import inspect, sys
from collections import namedtuple
from collections import defaultdict
from subprocess import DEVNULL
class Cookie (object):
def __init__ (self, app_name, notes=()):
self.optarg = namedtuple('optarg',
['full', 'abbrev', 'default'])
self.name = str(app_name)
self.notes = notes
def __parse (self, args):
ordered = list(); full = abbrev = dict()
args = args + ['']
i = 0
while i < len(args) - 1:
token = args[i]
next_token = args[i + 1]
if token.startswith('--'):
if next_token.startswith('-'):
raise ValueError('{} incomplete'.format(token))
else:
full[token[2:]] = next_token
i += 2
elif token.startswith('-'):
if next_token.startswith('-'):
raise ValueError('{} incomplete'.format(token))
else:
abbrev[token[1:]] = next_token
i += 2
else:
ordered.append(token)
i += 1
return ordered, full, abbrev
def __construct_ordered (self, params):
return [key for key, arg in params.items() if arg.default == inspect._empty]
def __construct_optional (self, params):
args = []
filtered = {
key: arg.default for key, arg in params.items() if arg.default != inspect._empty}
for key, default in filtered.items():
arg = self.optarg(full=key, abbrev=key[0].lower(), default=default)
args.append(arg)
args_full = args_abbrev = dict()
known_count = defaultdict(int)
for arg in args:
args_full[arg.full] = arg
if known_count[arg.abbrev] == 0: args_abbrev[arg.abbrev] = arg
elif known_count[arg.abbrev] == 1:
new_abbrev = arg.apprev.upper()
args_full[arg.full] = self.optarg(
full=arg.full,
abbrev=new_abbrev,
default=arg.default)
args_abbrev[new_abbrev] = args_full[arg.full]
else:
new_abbrev = arg.apprev.upper() + str(known_count[arg.abbrev])
args_full[arg.full] = self.optarg(
full=arg.full,
abbrev=new_abbrev,
default=arg.default)
args_abbrev[new_abbrev] = args_full[arg.full]
known_count[arg.abbrev] += 1
return args_full, args_abbrev
def __resolve (self, args, signature):
ordered, opt_parsed_full, opt_parsed_abbrev = self.__parse(args[1:])
ordered_def = self.__construct_ordered(signature.parameters)
if len(ordered) != len(ordered_def):
raise Exception('wrong number of oredered arguments')
opt_parsed = dict()
opt_parsed.update(opt_parsed_full)
opt_parsed.update(opt_parsed_abbrev)
opt_def_full, opt_def_abbrev = self.__construct_optional(signature.parameters)
optional = {o.full: o.default for o in opt_def_full.values()}
opt_def = dict()
opt_def.update(opt_def_full)
opt_def.update(opt_def_abbrev)
for key, value in opt_parsed.items():
if key not in opt_def: raise Exception('resolution error')
d = opt_def[key]
optional[d.full] = value
return ordered, optional
def __usage_outline (self, signature):
ordered = self.__construct_ordered(signature.parameters)
full, _ = self.__construct_optional(signature.parameters)
ordered_str = ' '.join(name.upper() for name in ordered)
optional_str = ' '.join('\n[-{} | --{} {}],'.format(
opt.abbrev, opt.full, opt.full.upper()) for opt in full.values())
optional_str = ''.join(optional_str.split(',')[::2])
return '{} {}'.format(ordered_str, optional_str)
def get_args (self, function):
def wrapper ():
sig = inspect.signature(function)
try:
ordered, optional = self.__resolve(sys.argv, sig)
except Exception:
self.outline = ('Usage: ', sys.argv[0], self.__usage_outline(sig,))
print(*self.outline)
if not self.notes == ():
print('\n'.join(self.notes) + '\n'+'\t'*1 + 'respectively')
return
function(*ordered, **optional)
return wrapper
def run (self, function_name, silent=False):
restore = sys.stdout
if silent:
sys.stdout = open('/dev/null', 'w').close()
function_name()
sys.stdout = restore
| true | true |
f713f85e83a78de7f23187319432ac1b46bbef92 | 3,117 | py | Python | tests/test_cron.py | SF-300/aiocron | fe1abc1728d6c8ecb7ecbf75f86f1014e823dfec | [
"MIT"
] | 245 | 2015-02-23T22:50:55.000Z | 2022-03-24T10:24:44.000Z | tests/test_cron.py | SF-300/aiocron | fe1abc1728d6c8ecb7ecbf75f86f1014e823dfec | [
"MIT"
] | 21 | 2015-10-21T16:00:29.000Z | 2022-03-28T08:25:46.000Z | tests/test_cron.py | SF-300/aiocron | fe1abc1728d6c8ecb7ecbf75f86f1014e823dfec | [
"MIT"
] | 28 | 2015-10-21T15:41:42.000Z | 2022-03-25T21:36:04.000Z | # -*- coding: utf-8 -*-
import time
import datetime
from aiocron import asyncio
from aiocron import crontab
import pytest
class CustomError(Exception):
pass
def test_str():
loop = asyncio.new_event_loop()
@crontab('* * * * * *', loop=loop)
def t():
pass
assert '* * * * *' in str(t)
def test_cron():
loop = asyncio.new_event_loop()
future = asyncio.Future(loop=loop)
@crontab('* * * * * *', start=False, loop=loop)
def t():
future.set_result(1)
t.start()
loop.run_until_complete(future)
t.stop()
assert future.result() == 1
def test_raise():
loop = asyncio.new_event_loop()
future = asyncio.Future(loop=loop)
@crontab('* * * * * *', start=False, loop=loop)
def t():
loop.call_later(1, future.set_result, 1)
raise ValueError()
t.start()
loop.run_until_complete(future)
t.stop()
assert future.result() == 1
def test_next():
loop = asyncio.new_event_loop()
def t():
return 1
t = crontab('* * * * * *', func=t, loop=loop)
future = asyncio.ensure_future(t.next(), loop=loop)
loop.run_until_complete(future)
assert future.result() == 1
def test_null_callback():
loop = asyncio.new_event_loop()
t = crontab('* * * * * *', loop=loop)
assert t.handle is None # not started
future = asyncio.ensure_future(t.next(4), loop=loop)
loop.run_until_complete(future)
assert future.result() == (4,)
def test_next_raise():
loop = asyncio.new_event_loop()
@crontab('* * * * * *', loop=loop)
def t():
raise CustomError()
future = asyncio.ensure_future(t.next(), loop=loop)
with pytest.raises(CustomError):
loop.run_until_complete(future)
def test_coro_next():
loop = asyncio.new_event_loop()
@crontab('* * * * * *', loop=loop)
async def t():
return 1
future = asyncio.ensure_future(t.next(), loop=loop)
loop.run_until_complete(future)
assert future.result() == 1
def test_coro_next_raise():
loop = asyncio.new_event_loop()
@crontab('* * * * * *', loop=loop)
async def t():
raise CustomError()
future = asyncio.ensure_future(t.next(), loop=loop)
with pytest.raises(CustomError):
loop.run_until_complete(future)
def test_next_dst(monkeypatch):
now = datetime.datetime.now()
class mydatetime:
@classmethod
def now(cls, tzinfo=None):
return datetime.datetime(
now.year + 1, 10, 29, 2, 58, 58,
tzinfo=tzinfo
)
monkeypatch.setattr('aiocron.datetime', mydatetime)
monkeypatch.setattr('dateutil.tz.time.timezone', -3600)
monkeypatch.setattr('dateutil.tz.time.altzone', -7200)
monkeypatch.setattr('dateutil.tz.time.daylight', 1)
monkeypatch.setattr('dateutil.tz.time.tzname', ('CET', 'CEST'))
loop = asyncio.new_event_loop()
t = crontab('* * * * *', loop=loop)
t.initialize()
# last hit in DST
a = t.get_next()
time.sleep(3)
# first hit after DST
b = t.get_next()
assert b - a == 60
| 20.78 | 67 | 0.605711 |
import time
import datetime
from aiocron import asyncio
from aiocron import crontab
import pytest
class CustomError(Exception):
pass
def test_str():
loop = asyncio.new_event_loop()
@crontab('* * * * * *', loop=loop)
def t():
pass
assert '* * * * *' in str(t)
def test_cron():
loop = asyncio.new_event_loop()
future = asyncio.Future(loop=loop)
@crontab('* * * * * *', start=False, loop=loop)
def t():
future.set_result(1)
t.start()
loop.run_until_complete(future)
t.stop()
assert future.result() == 1
def test_raise():
loop = asyncio.new_event_loop()
future = asyncio.Future(loop=loop)
@crontab('* * * * * *', start=False, loop=loop)
def t():
loop.call_later(1, future.set_result, 1)
raise ValueError()
t.start()
loop.run_until_complete(future)
t.stop()
assert future.result() == 1
def test_next():
loop = asyncio.new_event_loop()
def t():
return 1
t = crontab('* * * * * *', func=t, loop=loop)
future = asyncio.ensure_future(t.next(), loop=loop)
loop.run_until_complete(future)
assert future.result() == 1
def test_null_callback():
loop = asyncio.new_event_loop()
t = crontab('* * * * * *', loop=loop)
assert t.handle is None
future = asyncio.ensure_future(t.next(4), loop=loop)
loop.run_until_complete(future)
assert future.result() == (4,)
def test_next_raise():
loop = asyncio.new_event_loop()
@crontab('* * * * * *', loop=loop)
def t():
raise CustomError()
future = asyncio.ensure_future(t.next(), loop=loop)
with pytest.raises(CustomError):
loop.run_until_complete(future)
def test_coro_next():
loop = asyncio.new_event_loop()
@crontab('* * * * * *', loop=loop)
async def t():
return 1
future = asyncio.ensure_future(t.next(), loop=loop)
loop.run_until_complete(future)
assert future.result() == 1
def test_coro_next_raise():
loop = asyncio.new_event_loop()
@crontab('* * * * * *', loop=loop)
async def t():
raise CustomError()
future = asyncio.ensure_future(t.next(), loop=loop)
with pytest.raises(CustomError):
loop.run_until_complete(future)
def test_next_dst(monkeypatch):
now = datetime.datetime.now()
class mydatetime:
@classmethod
def now(cls, tzinfo=None):
return datetime.datetime(
now.year + 1, 10, 29, 2, 58, 58,
tzinfo=tzinfo
)
monkeypatch.setattr('aiocron.datetime', mydatetime)
monkeypatch.setattr('dateutil.tz.time.timezone', -3600)
monkeypatch.setattr('dateutil.tz.time.altzone', -7200)
monkeypatch.setattr('dateutil.tz.time.daylight', 1)
monkeypatch.setattr('dateutil.tz.time.tzname', ('CET', 'CEST'))
loop = asyncio.new_event_loop()
t = crontab('* * * * *', loop=loop)
t.initialize()
a = t.get_next()
time.sleep(3)
b = t.get_next()
assert b - a == 60
| true | true |
f713f8f82b5a192645afd5b548efee29e36fd35a | 23,097 | py | Python | nova/tests/unit/test_fixtures.py | maya2250/nova | e483ca1cd9a5db5856f87fc69ca07c42d2be5def | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/test_fixtures.py | maya2250/nova | e483ca1cd9a5db5856f87fc69ca07c42d2be5def | [
"Apache-2.0"
] | 1 | 2021-03-31T19:35:21.000Z | 2021-03-31T19:35:21.000Z | nova/tests/unit/test_fixtures.py | Mattlk13/nova | 5b13eb59540aaf535a53920e783964d106de2620 | [
"Apache-2.0"
] | 1 | 2020-07-22T09:09:38.000Z | 2020-07-22T09:09:38.000Z | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import fixtures as fx
import futurist
import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import uuidutils
from oslotest import output
import sqlalchemy
import testtools
from nova.compute import rpcapi as compute_rpcapi
from nova import conductor
from nova import context
from nova.db.sqlalchemy import api as session
from nova import exception
from nova.network import neutron as neutron_api
from nova import objects
from nova.objects import base as obj_base
from nova.objects import service as service_obj
from nova import test
from nova.tests import fixtures
from nova.tests.unit import conf_fixture
from nova.tests.unit import fake_instance
from nova import utils
CONF = cfg.CONF
class TestLogging(testtools.TestCase):
def test_default_logging(self):
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should be a null handler as well at DEBUG
self.assertEqual(2, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertNotIn("at debug", stdlog.logger.output)
# broken debug messages should still explode, even though we
# aren't logging them in the regular handler
self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo")
# and, ensure that one of the terrible log messages isn't
# output at info
warn_log = logging.getLogger('migrate.versioning.api')
warn_log.info("warn_log at info, should be skipped")
warn_log.error("warn_log at error")
self.assertIn("warn_log at error", stdlog.logger.output)
self.assertNotIn("warn_log at info", stdlog.logger.output)
def test_debug_logging(self):
self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1'))
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should no longer be a null handler
self.assertEqual(1, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertIn("at debug", stdlog.logger.output)
class TestOSAPIFixture(testtools.TestCase):
@mock.patch('nova.objects.Service.get_by_host_and_binary')
@mock.patch('nova.objects.Service.create')
def test_responds_to_version(self, mock_service_create, mock_get):
"""Ensure the OSAPI server responds to calls sensibly."""
self.useFixture(output.CaptureOutput())
self.useFixture(fixtures.StandardLogging())
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.RPCFixture('nova.test'))
api = self.useFixture(fixtures.OSAPIFixture()).api
# request the API root, which provides us the versions of the API
resp = api.api_request('/', strip_version=True)
self.assertEqual(200, resp.status_code, resp.content)
# request a bad root url, should be a 404
#
# NOTE(sdague): this currently fails, as it falls into the 300
# dispatcher instead. This is a bug. The test case is left in
# here, commented out until we can address it.
#
# resp = api.api_request('/foo', strip_version=True)
# self.assertEqual(resp.status_code, 400, resp.content)
# request a known bad url, and we should get a 404
resp = api.api_request('/foo')
self.assertEqual(404, resp.status_code, resp.content)
class TestDatabaseFixture(testtools.TestCase):
def test_fixture_reset(self):
# because this sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
engine = session.get_engine()
conn = engine.connect()
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
# insert a 6th instance type, column 5 below is an int id
# which has a constraint on it, so if new standard instance
# types are added you have to bump it.
conn.execute("insert into instance_types VALUES "
"(NULL, NULL, NULL, 't1.test', 6, 4096, 2, 0, NULL, '87'"
", 1.0, 40, 0, 0, 1, 0)")
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# reset by invoking the fixture again
#
# NOTE(sdague): it's important to reestablish the db
# connection because otherwise we have a reference to the old
# in mem db.
self.useFixture(fixtures.Database())
conn = engine.connect()
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
def test_api_fixture_reset(self):
# This sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database(database='api'))
engine = session.get_api_engine()
conn = engine.connect()
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
uuid = uuidutils.generate_uuid()
conn.execute("insert into cell_mappings (uuid, name) VALUES "
"('%s', 'fake-cell')" % (uuid,))
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# reset by invoking the fixture again
#
# NOTE(sdague): it's important to reestablish the db
# connection because otherwise we have a reference to the old
# in mem db.
self.useFixture(fixtures.Database(database='api'))
conn = engine.connect()
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
def test_fixture_cleanup(self):
# because this sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
fix = fixtures.Database()
self.useFixture(fix)
# manually do the cleanup that addCleanup will do
fix.cleanup()
# ensure the db contains nothing
engine = session.get_engine()
conn = engine.connect()
schema = "".join(line for line in conn.connection.iterdump())
self.assertEqual(schema, "BEGIN TRANSACTION;COMMIT;")
def test_api_fixture_cleanup(self):
# This sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
fix = fixtures.Database(database='api')
self.useFixture(fix)
# No data inserted by migrations so we need to add a row
engine = session.get_api_engine()
conn = engine.connect()
uuid = uuidutils.generate_uuid()
conn.execute("insert into cell_mappings (uuid, name) VALUES "
"('%s', 'fake-cell')" % (uuid,))
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# Manually do the cleanup that addCleanup will do
fix.cleanup()
# Ensure the db contains nothing
engine = session.get_api_engine()
conn = engine.connect()
schema = "".join(line for line in conn.connection.iterdump())
self.assertEqual("BEGIN TRANSACTION;COMMIT;", schema)
class TestDatabaseAtVersionFixture(testtools.TestCase):
def test_fixture_schema_version(self):
self.useFixture(conf_fixture.ConfFixture())
# In/after 317 aggregates did have uuid
self.useFixture(fixtures.DatabaseAtVersion(318))
engine = session.get_engine()
engine.connect()
meta = sqlalchemy.MetaData(engine)
aggregate = sqlalchemy.Table('aggregates', meta, autoload=True)
self.assertTrue(hasattr(aggregate.c, 'uuid'))
# Before 317, aggregates had no uuid
self.useFixture(fixtures.DatabaseAtVersion(316))
engine = session.get_engine()
engine.connect()
meta = sqlalchemy.MetaData(engine)
aggregate = sqlalchemy.Table('aggregates', meta, autoload=True)
self.assertFalse(hasattr(aggregate.c, 'uuid'))
engine.dispose()
def test_fixture_after_database_fixture(self):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
self.useFixture(fixtures.DatabaseAtVersion(318))
class TestDefaultFlavorsFixture(testtools.TestCase):
@mock.patch("nova.objects.flavor.Flavor._send_notification")
def test_flavors(self, mock_send_notification):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
self.useFixture(fixtures.Database(database='api'))
engine = session.get_api_engine()
conn = engine.connect()
result = conn.execute("select * from flavors")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
self.useFixture(fixtures.DefaultFlavorsFixture())
result = conn.execute("select * from flavors")
rows = result.fetchall()
self.assertEqual(6, len(rows), "Rows %s" % rows)
class TestIndirectionAPIFixture(testtools.TestCase):
def test_indirection_api(self):
# Should initially be None
self.assertIsNone(obj_base.NovaObject.indirection_api)
# make sure the fixture correctly sets the value
fix = fixtures.IndirectionAPIFixture('foo')
self.useFixture(fix)
self.assertEqual('foo', obj_base.NovaObject.indirection_api)
# manually do the cleanup that addCleanup will do
fix.cleanup()
# ensure the initial value is restored
self.assertIsNone(obj_base.NovaObject.indirection_api)
class TestSpawnIsSynchronousFixture(testtools.TestCase):
def test_spawn_patch(self):
orig_spawn = utils.spawn_n
fix = fixtures.SpawnIsSynchronousFixture()
self.useFixture(fix)
self.assertNotEqual(orig_spawn, utils.spawn_n)
def test_spawn_passes_through(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
tester = mock.MagicMock()
utils.spawn_n(tester.function, 'foo', bar='bar')
tester.function.assert_called_once_with('foo', bar='bar')
def test_spawn_return_has_wait(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn(lambda x: '%s' % x, 'foo')
foo = gt.wait()
self.assertEqual('foo', foo)
def test_spawn_n_return_has_wait(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn_n(lambda x: '%s' % x, 'foo')
foo = gt.wait()
self.assertEqual('foo', foo)
def test_spawn_has_link(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn(mock.MagicMock)
passed_arg = 'test'
call_count = []
def fake(thread, param):
self.assertEqual(gt, thread)
self.assertEqual(passed_arg, param)
call_count.append(1)
gt.link(fake, passed_arg)
self.assertEqual(1, len(call_count))
def test_spawn_n_has_link(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn_n(mock.MagicMock)
passed_arg = 'test'
call_count = []
def fake(thread, param):
self.assertEqual(gt, thread)
self.assertEqual(passed_arg, param)
call_count.append(1)
gt.link(fake, passed_arg)
self.assertEqual(1, len(call_count))
class TestSynchronousThreadPoolExecutorFixture(testtools.TestCase):
def test_submit_passes_through(self):
self.useFixture(fixtures.SynchronousThreadPoolExecutorFixture())
tester = mock.MagicMock()
executor = futurist.GreenThreadPoolExecutor()
future = executor.submit(tester.function, 'foo', bar='bar')
tester.function.assert_called_once_with('foo', bar='bar')
result = future.result()
self.assertEqual(tester.function.return_value, result)
class TestBannedDBSchemaOperations(testtools.TestCase):
def test_column(self):
column = sqlalchemy.Column()
with fixtures.BannedDBSchemaOperations(['Column']):
self.assertRaises(exception.DBNotAllowed,
column.drop)
self.assertRaises(exception.DBNotAllowed,
column.alter)
def test_table(self):
table = sqlalchemy.Table()
with fixtures.BannedDBSchemaOperations(['Table']):
self.assertRaises(exception.DBNotAllowed,
table.drop)
self.assertRaises(exception.DBNotAllowed,
table.alter)
class TestAllServicesCurrentFixture(testtools.TestCase):
@mock.patch('nova.objects.Service._db_service_get_minimum_version')
def test_services_current(self, mock_db):
mock_db.return_value = {'nova-compute': 123}
self.assertEqual(123, service_obj.Service.get_minimum_version(
None, 'nova-compute'))
mock_db.assert_called_once_with(None, ['nova-compute'],
use_slave=False)
mock_db.reset_mock()
compute_rpcapi.LAST_VERSION = 123
self.useFixture(fixtures.AllServicesCurrent())
self.assertIsNone(compute_rpcapi.LAST_VERSION)
self.assertEqual(service_obj.SERVICE_VERSION,
service_obj.Service.get_minimum_version(
None, 'nova-compute'))
self.assertFalse(mock_db.called)
class TestNoopConductorFixture(testtools.TestCase):
@mock.patch('nova.conductor.api.ComputeTaskAPI.resize_instance')
def test_task_api_not_called(self, mock_resize):
self.useFixture(fixtures.NoopConductorFixture())
conductor.ComputeTaskAPI().resize_instance()
self.assertFalse(mock_resize.called)
@mock.patch('nova.conductor.api.API.wait_until_ready')
def test_api_not_called(self, mock_wait):
self.useFixture(fixtures.NoopConductorFixture())
conductor.API().wait_until_ready()
self.assertFalse(mock_wait.called)
class TestSingleCellSimpleFixture(testtools.TestCase):
def test_single_cell(self):
self.useFixture(fixtures.SingleCellSimple())
cml = objects.CellMappingList.get_all(None)
self.assertEqual(1, len(cml))
def test_target_cell(self):
self.useFixture(fixtures.SingleCellSimple())
with context.target_cell(mock.sentinel.context, None) as c:
self.assertIs(mock.sentinel.context, c)
class TestWarningsFixture(test.TestCase):
def test_invalid_uuid_errors(self):
"""Creating an oslo.versionedobject with an invalid UUID value for a
UUIDField should raise an exception.
"""
valid_migration_kwargs = {
"created_at": timeutils.utcnow().replace(microsecond=0),
"updated_at": None,
"deleted_at": None,
"deleted": False,
"id": 123,
"uuid": uuids.migration,
"source_compute": "compute-source",
"dest_compute": "compute-dest",
"source_node": "node-source",
"dest_node": "node-dest",
"dest_host": "host-dest",
"old_instance_type_id": 42,
"new_instance_type_id": 84,
"instance_uuid": "fake-uuid",
"status": "migrating",
"migration_type": "resize",
"hidden": False,
"memory_total": 123456,
"memory_processed": 12345,
"memory_remaining": 111111,
"disk_total": 234567,
"disk_processed": 23456,
"disk_remaining": 211111,
}
# this shall not throw FutureWarning
objects.migration.Migration(**valid_migration_kwargs)
invalid_migration_kwargs = copy.deepcopy(valid_migration_kwargs)
invalid_migration_kwargs["uuid"] = "fake_id"
self.assertRaises(FutureWarning, objects.migration.Migration,
**invalid_migration_kwargs)
class TestDownCellFixture(test.TestCase):
def test_fixture(self):
# The test setup creates two cell mappings (cell0 and cell1) by
# default. Let's first list servers across all cells while they are
# "up" to make sure that works as expected. We'll create a single
# instance in cell1.
ctxt = context.get_admin_context()
cell1 = self.cell_mappings[test.CELL1_NAME]
with context.target_cell(ctxt, cell1) as cctxt:
inst = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst:
delattr(inst, 'id')
inst.create()
# Now list all instances from all cells (should get one back).
results = context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_all)
self.assertEqual(2, len(results))
self.assertEqual(0, len(results[objects.CellMapping.CELL0_UUID]))
self.assertEqual(1, len(results[cell1.uuid]))
# Now do the same but with the DownCellFixture which should result
# in exception results from both cells.
with fixtures.DownCellFixture():
results = context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_all)
self.assertEqual(2, len(results))
for result in results.values():
self.assertIsInstance(result, db_exc.DBError)
def test_fixture_when_explicitly_passing_down_cell_mappings(self):
# The test setup creates two cell mappings (cell0 and cell1) by
# default. We'll create one instance per cell and pass cell0 as
# the down cell. We should thus get db_exc.DBError for cell0 and
# correct InstanceList object from cell1.
ctxt = context.get_admin_context()
cell0 = self.cell_mappings['cell0']
cell1 = self.cell_mappings['cell1']
with context.target_cell(ctxt, cell0) as cctxt:
inst1 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst1:
delattr(inst1, 'id')
inst1.create()
with context.target_cell(ctxt, cell1) as cctxt:
inst2 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst2:
delattr(inst2, 'id')
inst2.create()
with fixtures.DownCellFixture([cell0]):
results = context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_all)
self.assertEqual(2, len(results))
for cell_uuid, result in results.items():
if cell_uuid == cell0.uuid:
self.assertIsInstance(result, db_exc.DBError)
else:
self.assertIsInstance(result, objects.InstanceList)
self.assertEqual(1, len(result))
self.assertEqual(inst2.uuid, result[0].uuid)
def test_fixture_for_an_individual_down_cell_targeted_call(self):
# We have cell0 and cell1 by default in the setup. We try targeting
# both the cells. We should get a db error for the down cell and
# the correct result for the up cell.
ctxt = context.get_admin_context()
cell0 = self.cell_mappings['cell0']
cell1 = self.cell_mappings['cell1']
with context.target_cell(ctxt, cell0) as cctxt:
inst1 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst1:
delattr(inst1, 'id')
inst1.create()
with context.target_cell(ctxt, cell1) as cctxt:
inst2 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst2:
delattr(inst2, 'id')
inst2.create()
def dummy_tester(ctxt, cell_mapping, uuid):
with context.target_cell(ctxt, cell_mapping) as cctxt:
return objects.Instance.get_by_uuid(cctxt, uuid)
# Scenario A: We do not pass any down cells, fixture automatically
# assumes the targeted cell is down whether its cell0 or cell1.
with fixtures.DownCellFixture():
self.assertRaises(
db_exc.DBError, dummy_tester, ctxt, cell1, inst2.uuid)
# Scenario B: We pass cell0 as the down cell.
with fixtures.DownCellFixture([cell0]):
self.assertRaises(
db_exc.DBError, dummy_tester, ctxt, cell0, inst1.uuid)
# Scenario C: We get the correct result from the up cell
# when targeted.
result = dummy_tester(ctxt, cell1, inst2.uuid)
self.assertEqual(inst2.uuid, result.uuid)
class TestNeutronFixture(test.NoDBTestCase):
def setUp(self):
super(TestNeutronFixture, self).setUp()
self.neutron = self.useFixture(fixtures.NeutronFixture(self))
def test_list_ports_with_resource_request_non_admin_client(self):
ctxt = context.get_context()
client = neutron_api.get_client(ctxt)
ports = client.list_ports(ctxt)['ports']
port_id = self.neutron.port_with_resource_request['id']
ports = [port for port in ports if port_id == port['id']]
self.assertIsNone(ports[0]['resource_request'])
def test_list_ports_with_resource_request_admin_client(self):
ctxt = context.get_admin_context()
client = neutron_api.get_client(ctxt)
ports = client.list_ports(ctxt)['ports']
port_id = self.neutron.port_with_resource_request['id']
ports = [port for port in ports if port_id == port['id']]
self.assertIsNotNone(ports[0]['resource_request'])
| 40.592267 | 78 | 0.650387 |
import copy
import fixtures as fx
import futurist
import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import uuidutils
from oslotest import output
import sqlalchemy
import testtools
from nova.compute import rpcapi as compute_rpcapi
from nova import conductor
from nova import context
from nova.db.sqlalchemy import api as session
from nova import exception
from nova.network import neutron as neutron_api
from nova import objects
from nova.objects import base as obj_base
from nova.objects import service as service_obj
from nova import test
from nova.tests import fixtures
from nova.tests.unit import conf_fixture
from nova.tests.unit import fake_instance
from nova import utils
CONF = cfg.CONF
class TestLogging(testtools.TestCase):
def test_default_logging(self):
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
self.assertEqual(2, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertNotIn("at debug", stdlog.logger.output)
self.assertRaises(TypeError, log.debug, "this is broken %s %s", "foo")
# and, ensure that one of the terrible log messages isn't
warn_log = logging.getLogger('migrate.versioning.api')
warn_log.info("warn_log at info, should be skipped")
warn_log.error("warn_log at error")
self.assertIn("warn_log at error", stdlog.logger.output)
self.assertNotIn("warn_log at info", stdlog.logger.output)
def test_debug_logging(self):
self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1'))
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
self.assertEqual(1, len(root.handlers), root.handlers)
log = logging.getLogger(__name__)
log.info("at info")
log.debug("at debug")
self.assertIn("at info", stdlog.logger.output)
self.assertIn("at debug", stdlog.logger.output)
class TestOSAPIFixture(testtools.TestCase):
@mock.patch('nova.objects.Service.get_by_host_and_binary')
@mock.patch('nova.objects.Service.create')
def test_responds_to_version(self, mock_service_create, mock_get):
self.useFixture(output.CaptureOutput())
self.useFixture(fixtures.StandardLogging())
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.RPCFixture('nova.test'))
api = self.useFixture(fixtures.OSAPIFixture()).api
resp = api.api_request('/', strip_version=True)
self.assertEqual(200, resp.status_code, resp.content)
resp = api.api_request('/foo')
self.assertEqual(404, resp.status_code, resp.content)
class TestDatabaseFixture(testtools.TestCase):
def test_fixture_reset(self):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
engine = session.get_engine()
conn = engine.connect()
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
conn.execute("insert into instance_types VALUES "
"(NULL, NULL, NULL, 't1.test', 6, 4096, 2, 0, NULL, '87'"
", 1.0, 40, 0, 0, 1, 0)")
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# connection because otherwise we have a reference to the old
# in mem db.
self.useFixture(fixtures.Database())
conn = engine.connect()
result = conn.execute("select * from instance_types")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
def test_api_fixture_reset(self):
# This sets up reasonable db connection strings
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database(database='api'))
engine = session.get_api_engine()
conn = engine.connect()
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
uuid = uuidutils.generate_uuid()
conn.execute("insert into cell_mappings (uuid, name) VALUES "
"('%s', 'fake-cell')" % (uuid,))
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
# reset by invoking the fixture again
#
# NOTE(sdague): it's important to reestablish the db
self.useFixture(fixtures.Database(database='api'))
conn = engine.connect()
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
def test_fixture_cleanup(self):
self.useFixture(conf_fixture.ConfFixture())
fix = fixtures.Database()
self.useFixture(fix)
fix.cleanup()
engine = session.get_engine()
conn = engine.connect()
schema = "".join(line for line in conn.connection.iterdump())
self.assertEqual(schema, "BEGIN TRANSACTION;COMMIT;")
def test_api_fixture_cleanup(self):
self.useFixture(conf_fixture.ConfFixture())
fix = fixtures.Database(database='api')
self.useFixture(fix)
engine = session.get_api_engine()
conn = engine.connect()
uuid = uuidutils.generate_uuid()
conn.execute("insert into cell_mappings (uuid, name) VALUES "
"('%s', 'fake-cell')" % (uuid,))
result = conn.execute("select * from cell_mappings")
rows = result.fetchall()
self.assertEqual(1, len(rows), "Rows %s" % rows)
fix.cleanup()
engine = session.get_api_engine()
conn = engine.connect()
schema = "".join(line for line in conn.connection.iterdump())
self.assertEqual("BEGIN TRANSACTION;COMMIT;", schema)
class TestDatabaseAtVersionFixture(testtools.TestCase):
def test_fixture_schema_version(self):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.DatabaseAtVersion(318))
engine = session.get_engine()
engine.connect()
meta = sqlalchemy.MetaData(engine)
aggregate = sqlalchemy.Table('aggregates', meta, autoload=True)
self.assertTrue(hasattr(aggregate.c, 'uuid'))
self.useFixture(fixtures.DatabaseAtVersion(316))
engine = session.get_engine()
engine.connect()
meta = sqlalchemy.MetaData(engine)
aggregate = sqlalchemy.Table('aggregates', meta, autoload=True)
self.assertFalse(hasattr(aggregate.c, 'uuid'))
engine.dispose()
def test_fixture_after_database_fixture(self):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
self.useFixture(fixtures.DatabaseAtVersion(318))
class TestDefaultFlavorsFixture(testtools.TestCase):
@mock.patch("nova.objects.flavor.Flavor._send_notification")
def test_flavors(self, mock_send_notification):
self.useFixture(conf_fixture.ConfFixture())
self.useFixture(fixtures.Database())
self.useFixture(fixtures.Database(database='api'))
engine = session.get_api_engine()
conn = engine.connect()
result = conn.execute("select * from flavors")
rows = result.fetchall()
self.assertEqual(0, len(rows), "Rows %s" % rows)
self.useFixture(fixtures.DefaultFlavorsFixture())
result = conn.execute("select * from flavors")
rows = result.fetchall()
self.assertEqual(6, len(rows), "Rows %s" % rows)
class TestIndirectionAPIFixture(testtools.TestCase):
def test_indirection_api(self):
self.assertIsNone(obj_base.NovaObject.indirection_api)
fix = fixtures.IndirectionAPIFixture('foo')
self.useFixture(fix)
self.assertEqual('foo', obj_base.NovaObject.indirection_api)
fix.cleanup()
self.assertIsNone(obj_base.NovaObject.indirection_api)
class TestSpawnIsSynchronousFixture(testtools.TestCase):
def test_spawn_patch(self):
orig_spawn = utils.spawn_n
fix = fixtures.SpawnIsSynchronousFixture()
self.useFixture(fix)
self.assertNotEqual(orig_spawn, utils.spawn_n)
def test_spawn_passes_through(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
tester = mock.MagicMock()
utils.spawn_n(tester.function, 'foo', bar='bar')
tester.function.assert_called_once_with('foo', bar='bar')
def test_spawn_return_has_wait(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn(lambda x: '%s' % x, 'foo')
foo = gt.wait()
self.assertEqual('foo', foo)
def test_spawn_n_return_has_wait(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn_n(lambda x: '%s' % x, 'foo')
foo = gt.wait()
self.assertEqual('foo', foo)
def test_spawn_has_link(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn(mock.MagicMock)
passed_arg = 'test'
call_count = []
def fake(thread, param):
self.assertEqual(gt, thread)
self.assertEqual(passed_arg, param)
call_count.append(1)
gt.link(fake, passed_arg)
self.assertEqual(1, len(call_count))
def test_spawn_n_has_link(self):
self.useFixture(fixtures.SpawnIsSynchronousFixture())
gt = utils.spawn_n(mock.MagicMock)
passed_arg = 'test'
call_count = []
def fake(thread, param):
self.assertEqual(gt, thread)
self.assertEqual(passed_arg, param)
call_count.append(1)
gt.link(fake, passed_arg)
self.assertEqual(1, len(call_count))
class TestSynchronousThreadPoolExecutorFixture(testtools.TestCase):
def test_submit_passes_through(self):
self.useFixture(fixtures.SynchronousThreadPoolExecutorFixture())
tester = mock.MagicMock()
executor = futurist.GreenThreadPoolExecutor()
future = executor.submit(tester.function, 'foo', bar='bar')
tester.function.assert_called_once_with('foo', bar='bar')
result = future.result()
self.assertEqual(tester.function.return_value, result)
class TestBannedDBSchemaOperations(testtools.TestCase):
def test_column(self):
column = sqlalchemy.Column()
with fixtures.BannedDBSchemaOperations(['Column']):
self.assertRaises(exception.DBNotAllowed,
column.drop)
self.assertRaises(exception.DBNotAllowed,
column.alter)
def test_table(self):
table = sqlalchemy.Table()
with fixtures.BannedDBSchemaOperations(['Table']):
self.assertRaises(exception.DBNotAllowed,
table.drop)
self.assertRaises(exception.DBNotAllowed,
table.alter)
class TestAllServicesCurrentFixture(testtools.TestCase):
@mock.patch('nova.objects.Service._db_service_get_minimum_version')
def test_services_current(self, mock_db):
mock_db.return_value = {'nova-compute': 123}
self.assertEqual(123, service_obj.Service.get_minimum_version(
None, 'nova-compute'))
mock_db.assert_called_once_with(None, ['nova-compute'],
use_slave=False)
mock_db.reset_mock()
compute_rpcapi.LAST_VERSION = 123
self.useFixture(fixtures.AllServicesCurrent())
self.assertIsNone(compute_rpcapi.LAST_VERSION)
self.assertEqual(service_obj.SERVICE_VERSION,
service_obj.Service.get_minimum_version(
None, 'nova-compute'))
self.assertFalse(mock_db.called)
class TestNoopConductorFixture(testtools.TestCase):
@mock.patch('nova.conductor.api.ComputeTaskAPI.resize_instance')
def test_task_api_not_called(self, mock_resize):
self.useFixture(fixtures.NoopConductorFixture())
conductor.ComputeTaskAPI().resize_instance()
self.assertFalse(mock_resize.called)
@mock.patch('nova.conductor.api.API.wait_until_ready')
def test_api_not_called(self, mock_wait):
self.useFixture(fixtures.NoopConductorFixture())
conductor.API().wait_until_ready()
self.assertFalse(mock_wait.called)
class TestSingleCellSimpleFixture(testtools.TestCase):
def test_single_cell(self):
self.useFixture(fixtures.SingleCellSimple())
cml = objects.CellMappingList.get_all(None)
self.assertEqual(1, len(cml))
def test_target_cell(self):
self.useFixture(fixtures.SingleCellSimple())
with context.target_cell(mock.sentinel.context, None) as c:
self.assertIs(mock.sentinel.context, c)
class TestWarningsFixture(test.TestCase):
def test_invalid_uuid_errors(self):
valid_migration_kwargs = {
"created_at": timeutils.utcnow().replace(microsecond=0),
"updated_at": None,
"deleted_at": None,
"deleted": False,
"id": 123,
"uuid": uuids.migration,
"source_compute": "compute-source",
"dest_compute": "compute-dest",
"source_node": "node-source",
"dest_node": "node-dest",
"dest_host": "host-dest",
"old_instance_type_id": 42,
"new_instance_type_id": 84,
"instance_uuid": "fake-uuid",
"status": "migrating",
"migration_type": "resize",
"hidden": False,
"memory_total": 123456,
"memory_processed": 12345,
"memory_remaining": 111111,
"disk_total": 234567,
"disk_processed": 23456,
"disk_remaining": 211111,
}
objects.migration.Migration(**valid_migration_kwargs)
invalid_migration_kwargs = copy.deepcopy(valid_migration_kwargs)
invalid_migration_kwargs["uuid"] = "fake_id"
self.assertRaises(FutureWarning, objects.migration.Migration,
**invalid_migration_kwargs)
class TestDownCellFixture(test.TestCase):
def test_fixture(self):
# "up" to make sure that works as expected. We'll create a single
ctxt = context.get_admin_context()
cell1 = self.cell_mappings[test.CELL1_NAME]
with context.target_cell(ctxt, cell1) as cctxt:
inst = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst:
delattr(inst, 'id')
inst.create()
results = context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_all)
self.assertEqual(2, len(results))
self.assertEqual(0, len(results[objects.CellMapping.CELL0_UUID]))
self.assertEqual(1, len(results[cell1.uuid]))
with fixtures.DownCellFixture():
results = context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_all)
self.assertEqual(2, len(results))
for result in results.values():
self.assertIsInstance(result, db_exc.DBError)
def test_fixture_when_explicitly_passing_down_cell_mappings(self):
# the down cell. We should thus get db_exc.DBError for cell0 and
# correct InstanceList object from cell1.
ctxt = context.get_admin_context()
cell0 = self.cell_mappings['cell0']
cell1 = self.cell_mappings['cell1']
with context.target_cell(ctxt, cell0) as cctxt:
inst1 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst1:
delattr(inst1, 'id')
inst1.create()
with context.target_cell(ctxt, cell1) as cctxt:
inst2 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst2:
delattr(inst2, 'id')
inst2.create()
with fixtures.DownCellFixture([cell0]):
results = context.scatter_gather_all_cells(
ctxt, objects.InstanceList.get_all)
self.assertEqual(2, len(results))
for cell_uuid, result in results.items():
if cell_uuid == cell0.uuid:
self.assertIsInstance(result, db_exc.DBError)
else:
self.assertIsInstance(result, objects.InstanceList)
self.assertEqual(1, len(result))
self.assertEqual(inst2.uuid, result[0].uuid)
def test_fixture_for_an_individual_down_cell_targeted_call(self):
# We have cell0 and cell1 by default in the setup. We try targeting
# both the cells. We should get a db error for the down cell and
# the correct result for the up cell.
ctxt = context.get_admin_context()
cell0 = self.cell_mappings['cell0']
cell1 = self.cell_mappings['cell1']
with context.target_cell(ctxt, cell0) as cctxt:
inst1 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst1:
delattr(inst1, 'id')
inst1.create()
with context.target_cell(ctxt, cell1) as cctxt:
inst2 = fake_instance.fake_instance_obj(cctxt)
if 'id' in inst2:
delattr(inst2, 'id')
inst2.create()
def dummy_tester(ctxt, cell_mapping, uuid):
with context.target_cell(ctxt, cell_mapping) as cctxt:
return objects.Instance.get_by_uuid(cctxt, uuid)
# Scenario A: We do not pass any down cells, fixture automatically
# assumes the targeted cell is down whether its cell0 or cell1.
with fixtures.DownCellFixture():
self.assertRaises(
db_exc.DBError, dummy_tester, ctxt, cell1, inst2.uuid)
# Scenario B: We pass cell0 as the down cell.
with fixtures.DownCellFixture([cell0]):
self.assertRaises(
db_exc.DBError, dummy_tester, ctxt, cell0, inst1.uuid)
# Scenario C: We get the correct result from the up cell
# when targeted.
result = dummy_tester(ctxt, cell1, inst2.uuid)
self.assertEqual(inst2.uuid, result.uuid)
class TestNeutronFixture(test.NoDBTestCase):
def setUp(self):
super(TestNeutronFixture, self).setUp()
self.neutron = self.useFixture(fixtures.NeutronFixture(self))
def test_list_ports_with_resource_request_non_admin_client(self):
ctxt = context.get_context()
client = neutron_api.get_client(ctxt)
ports = client.list_ports(ctxt)['ports']
port_id = self.neutron.port_with_resource_request['id']
ports = [port for port in ports if port_id == port['id']]
self.assertIsNone(ports[0]['resource_request'])
def test_list_ports_with_resource_request_admin_client(self):
ctxt = context.get_admin_context()
client = neutron_api.get_client(ctxt)
ports = client.list_ports(ctxt)['ports']
port_id = self.neutron.port_with_resource_request['id']
ports = [port for port in ports if port_id == port['id']]
self.assertIsNotNone(ports[0]['resource_request'])
| true | true |
f713fb2b521629818719bab00598f5172b4dfd6d | 58,015 | py | Python | sphinx/builders/html/__init__.py | SamB/sphinx | bf010790ace78ba4bc4231445e73bcecf97e4947 | [
"BSD-2-Clause"
] | null | null | null | sphinx/builders/html/__init__.py | SamB/sphinx | bf010790ace78ba4bc4231445e73bcecf97e4947 | [
"BSD-2-Clause"
] | null | null | null | sphinx/builders/html/__init__.py | SamB/sphinx | bf010790ace78ba4bc4231445e73bcecf97e4947 | [
"BSD-2-Clause"
] | null | null | null | """Several HTML builders."""
import html
import os
import posixpath
import re
import sys
from datetime import datetime
from os import path
from typing import IO, Any, Dict, Iterable, Iterator, List, Optional, Set, Tuple, Type
from urllib.parse import quote
from docutils import nodes
from docutils.core import publish_parts
from docutils.frontend import OptionParser
from docutils.io import DocTreeInput, StringOutput
from docutils.nodes import Node
from docutils.utils import relative_path
from sphinx import __display_version__, package_dir
from sphinx import version_info as sphinx_version
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.config import ENUM, Config
from sphinx.domains import Domain, Index, IndexEntry
from sphinx.environment.adapters.asset import ImageAdapter
from sphinx.environment.adapters.indexentries import IndexEntries
from sphinx.environment.adapters.toctree import TocTree
from sphinx.errors import ConfigError, ThemeError
from sphinx.highlighting import PygmentsBridge
from sphinx.locale import _, __
from sphinx.search import js_index
from sphinx.theming import HTMLThemeFactory
from sphinx.util import isurl, logging, md5, progress_message, status_iterator
from sphinx.util.docutils import is_html5_writer_available, new_document
from sphinx.util.fileutil import copy_asset
from sphinx.util.i18n import format_date
from sphinx.util.inventory import InventoryFile
from sphinx.util.matching import DOTFILES, Matcher, patmatch
from sphinx.util.osutil import copyfile, ensuredir, os_path, relative_uri
from sphinx.util.tags import Tags
from sphinx.writers.html import HTMLTranslator, HTMLWriter
# HTML5 Writer is available or not
if is_html5_writer_available():
from sphinx.writers.html5 import HTML5Translator
html5_ready = True
else:
html5_ready = False
#: the filename for the inventory of objects
INVENTORY_FILENAME = 'objects.inv'
logger = logging.getLogger(__name__)
return_codes_re = re.compile('[\r\n]+')
def get_stable_hash(obj: Any) -> str:
"""
Return a stable hash for a Python data structure. We can't just use
the md5 of str(obj) since for example dictionary items are enumerated
in unpredictable order due to hash randomization in newer Pythons.
"""
if isinstance(obj, dict):
return get_stable_hash(list(obj.items()))
elif isinstance(obj, (list, tuple)):
obj = sorted(get_stable_hash(o) for o in obj)
return md5(str(obj).encode()).hexdigest()
def convert_locale_to_language_tag(locale: Optional[str]) -> Optional[str]:
"""Convert a locale string to a language tag (ex. en_US -> en-US).
refs: BCP 47 (:rfc:`5646`)
"""
if locale:
return locale.replace('_', '-')
else:
return None
class Stylesheet(str):
"""A metadata of stylesheet.
To keep compatibility with old themes, an instance of stylesheet behaves as
its filename (str).
"""
attributes: Dict[str, str] = None
filename: str = None
priority: int = None
def __new__(cls, filename: str, *args: str, priority: int = 500, **attributes: Any
) -> "Stylesheet":
self = str.__new__(cls, filename)
self.filename = filename
self.priority = priority
self.attributes = attributes
self.attributes.setdefault('rel', 'stylesheet')
self.attributes.setdefault('type', 'text/css')
if args: # old style arguments (rel, title)
self.attributes['rel'] = args[0]
self.attributes['title'] = args[1]
return self
class JavaScript(str):
"""A metadata of javascript file.
To keep compatibility with old themes, an instance of javascript behaves as
its filename (str).
"""
attributes: Dict[str, str] = None
filename: str = None
priority: int = None
def __new__(cls, filename: str, priority: int = 500, **attributes: str) -> "JavaScript":
self = str.__new__(cls, filename)
self.filename = filename
self.priority = priority
self.attributes = attributes
return self
class BuildInfo:
"""buildinfo file manipulator.
HTMLBuilder and its family are storing their own envdata to ``.buildinfo``.
This class is a manipulator for the file.
"""
@classmethod
def load(cls, f: IO) -> "BuildInfo":
try:
lines = f.readlines()
assert lines[0].rstrip() == '# Sphinx build info version 1'
assert lines[2].startswith('config: ')
assert lines[3].startswith('tags: ')
build_info = BuildInfo()
build_info.config_hash = lines[2].split()[1].strip()
build_info.tags_hash = lines[3].split()[1].strip()
return build_info
except Exception as exc:
raise ValueError(__('build info file is broken: %r') % exc) from exc
def __init__(self, config: Config = None, tags: Tags = None, config_categories: List[str] = []) -> None: # NOQA
self.config_hash = ''
self.tags_hash = ''
if config:
values = {c.name: c.value for c in config.filter(config_categories)}
self.config_hash = get_stable_hash(values)
if tags:
self.tags_hash = get_stable_hash(sorted(tags))
def __eq__(self, other: "BuildInfo") -> bool: # type: ignore
return (self.config_hash == other.config_hash and
self.tags_hash == other.tags_hash)
def dump(self, f: IO) -> None:
f.write('# Sphinx build info version 1\n'
'# This file hashes the configuration used when building these files.'
' When it is not found, a full rebuild will be done.\n'
'config: %s\n'
'tags: %s\n' %
(self.config_hash, self.tags_hash))
class StandaloneHTMLBuilder(Builder):
"""
Builds standalone HTML docs.
"""
name = 'html'
format = 'html'
epilog = __('The HTML pages are in %(outdir)s.')
copysource = True
allow_parallel = True
out_suffix = '.html'
link_suffix = '.html' # defaults to matching out_suffix
indexer_format: Any = js_index
indexer_dumps_unicode = True
# create links to original images from images [True/False]
html_scaled_image_link = True
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
supported_remote_images = True
supported_data_uri_images = True
searchindex_filename = 'searchindex.js'
add_permalinks = True
allow_sharp_as_current_path = True
embedded = False # for things like HTML help or Qt help: suppresses sidebar
search = True # for things like HTML help and Apple help: suppress search
use_index = False
download_support = True # enable download role
imgpath: str = None
domain_indices: List[Tuple[str, Type[Index], List[Tuple[str, List[IndexEntry]]], bool]] = [] # NOQA
def __init__(self, app: Sphinx) -> None:
super().__init__(app)
# CSS files
self.css_files: List[Stylesheet] = []
# JS files
self.script_files: List[JavaScript] = []
def init(self) -> None:
self.build_info = self.create_build_info()
# basename of images directory
self.imagedir = '_images'
# section numbers for headings in the currently visited document
self.secnumbers: Dict[str, Tuple[int, ...]] = {}
# currently written docname
self.current_docname: str = None
self.init_templates()
self.init_highlighter()
self.init_css_files()
self.init_js_files()
html_file_suffix = self.get_builder_config('file_suffix', 'html')
if html_file_suffix is not None:
self.out_suffix = html_file_suffix
html_link_suffix = self.get_builder_config('link_suffix', 'html')
if html_link_suffix is not None:
self.link_suffix = html_link_suffix
else:
self.link_suffix = self.out_suffix
self.use_index = self.get_builder_config('use_index', 'html')
def create_build_info(self) -> BuildInfo:
return BuildInfo(self.config, self.tags, ['html'])
def _get_translations_js(self) -> str:
candidates = [path.join(dir, self.config.language,
'LC_MESSAGES', 'sphinx.js')
for dir in self.config.locale_dirs] + \
[path.join(package_dir, 'locale', self.config.language,
'LC_MESSAGES', 'sphinx.js'),
path.join(sys.prefix, 'share/sphinx/locale',
self.config.language, 'sphinx.js')]
for jsfile in candidates:
if path.isfile(jsfile):
return jsfile
return None
def _get_style_filename(self) -> str:
if self.config.html_style is not None:
return self.config.html_style
elif self.theme:
return self.theme.get_config('theme', 'stylesheet')
else:
return 'default.css'
def get_theme_config(self) -> Tuple[str, Dict]:
return self.config.html_theme, self.config.html_theme_options
def init_templates(self) -> None:
theme_factory = HTMLThemeFactory(self.app)
themename, themeoptions = self.get_theme_config()
self.theme = theme_factory.create(themename)
self.theme_options = themeoptions.copy()
self.create_template_bridge()
self.templates.init(self, self.theme)
def init_highlighter(self) -> None:
# determine Pygments style and create the highlighter
if self.config.pygments_style is not None:
style = self.config.pygments_style
elif self.theme:
style = self.theme.get_config('theme', 'pygments_style', 'none')
else:
style = 'sphinx'
self.highlighter = PygmentsBridge('html', style)
if self.theme:
dark_style = self.theme.get_config('theme', 'pygments_dark_style', None)
else:
dark_style = None
if dark_style is not None:
self.dark_highlighter = PygmentsBridge('html', dark_style)
self.app.add_css_file('pygments_dark.css',
media='(prefers-color-scheme: dark)',
id='pygments_dark_css')
else:
self.dark_highlighter = None
def init_css_files(self) -> None:
self.css_files = []
self.add_css_file('pygments.css', priority=200)
self.add_css_file(self._get_style_filename(), priority=200)
for filename, attrs in self.app.registry.css_files:
self.add_css_file(filename, **attrs)
for filename, attrs in self.get_builder_config('css_files', 'html'):
attrs.setdefault('priority', 800) # User's CSSs are loaded after extensions'
self.add_css_file(filename, **attrs)
def add_css_file(self, filename: str, **kwargs: Any) -> None:
if '://' not in filename:
filename = posixpath.join('_static', filename)
self.css_files.append(Stylesheet(filename, **kwargs))
def init_js_files(self) -> None:
self.script_files = []
self.add_js_file('documentation_options.js', id="documentation_options",
data_url_root='', priority=200)
# Remove frameworks and compatability module below in Sphinx 6.0
# xref RemovedInSphinx60Warning
self.add_js_file('jquery.js', priority=200)
self.add_js_file('underscore.js', priority=200)
self.add_js_file('_sphinx_javascript_frameworks_compat.js', priority=200)
self.add_js_file('doctools.js', priority=200)
for filename, attrs in self.app.registry.js_files:
self.add_js_file(filename, **attrs)
for filename, attrs in self.get_builder_config('js_files', 'html'):
attrs.setdefault('priority', 800) # User's JSs are loaded after extensions'
self.add_js_file(filename, **attrs)
if self._get_translations_js():
self.add_js_file('translations.js')
def add_js_file(self, filename: str, **kwargs: Any) -> None:
if filename and '://' not in filename:
filename = posixpath.join('_static', filename)
self.script_files.append(JavaScript(filename, **kwargs))
@property
def default_translator_class(self) -> Type[nodes.NodeVisitor]: # type: ignore
if not html5_ready or self.config.html4_writer:
return HTMLTranslator
else:
return HTML5Translator
@property
def math_renderer_name(self) -> str:
name = self.get_builder_config('math_renderer', 'html')
if name is not None:
# use given name
return name
else:
# not given: choose a math_renderer from registered ones as possible
renderers = list(self.app.registry.html_inline_math_renderers)
if len(renderers) == 1:
# only default math_renderer (mathjax) is registered
return renderers[0]
elif len(renderers) == 2:
# default and another math_renderer are registered; prior the another
renderers.remove('mathjax')
return renderers[0]
else:
# many math_renderers are registered. can't choose automatically!
return None
def get_outdated_docs(self) -> Iterator[str]:
try:
with open(path.join(self.outdir, '.buildinfo')) as fp:
buildinfo = BuildInfo.load(fp)
if self.build_info != buildinfo:
logger.debug('[build target] did not match: build_info ')
yield from self.env.found_docs
return
except ValueError as exc:
logger.warning(__('Failed to read build info file: %r'), exc)
except OSError:
# ignore errors on reading
pass
if self.templates:
template_mtime = self.templates.newest_template_mtime()
else:
template_mtime = 0
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
logger.debug('[build target] did not in env: %r', docname)
yield docname
continue
targetname = self.get_outfilename(docname)
try:
targetmtime = path.getmtime(targetname)
except Exception:
targetmtime = 0
try:
srcmtime = max(path.getmtime(self.env.doc2path(docname)),
template_mtime)
if srcmtime > targetmtime:
logger.debug(
'[build target] targetname %r(%s), template(%s), docname %r(%s)',
targetname,
datetime.utcfromtimestamp(targetmtime),
datetime.utcfromtimestamp(template_mtime),
docname,
datetime.utcfromtimestamp(path.getmtime(self.env.doc2path(docname))),
)
yield docname
except OSError:
# source doesn't exist anymore
pass
def get_asset_paths(self) -> List[str]:
return self.config.html_extra_path + self.config.html_static_path
def render_partial(self, node: Node) -> Dict[str, str]:
"""Utility: Render a lone doctree node."""
if node is None:
return {'fragment': ''}
doc = new_document('<partial node>')
doc.append(node)
writer = HTMLWriter(self)
return publish_parts(reader_name='doctree',
writer=writer,
source_class=DocTreeInput,
settings_overrides={'output_encoding': 'unicode'},
source=doc)
def prepare_writing(self, docnames: Set[str]) -> None:
# create the search indexer
self.indexer = None
if self.search:
from sphinx.search import IndexBuilder
lang = self.config.html_search_language or self.config.language
self.indexer = IndexBuilder(self.env, lang,
self.config.html_search_options,
self.config.html_search_scorer)
self.load_indexer(docnames)
self.docwriter = HTMLWriter(self)
self.docsettings: Any = OptionParser(
defaults=self.env.settings,
components=(self.docwriter,),
read_config_files=True).get_default_values()
self.docsettings.compact_lists = bool(self.config.html_compact_lists)
# determine the additional indices to include
self.domain_indices = []
# html_domain_indices can be False/True or a list of index names
indices_config = self.config.html_domain_indices
if indices_config:
for domain_name in sorted(self.env.domains):
domain: Domain = self.env.domains[domain_name]
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
content, collapse = indexcls(domain).generate()
if content:
self.domain_indices.append(
(indexname, indexcls, content, collapse))
# format the "last updated on" string, only once is enough since it
# typically doesn't include the time of day
lufmt = self.config.html_last_updated_fmt
if lufmt is not None:
self.last_updated = format_date(lufmt or _('%b %d, %Y'),
language=self.config.language)
else:
self.last_updated = None
# If the logo or favicon are urls, keep them as-is, otherwise
# strip the relative path as the files will be copied into _static.
logo = self.config.html_logo or ''
favicon = self.config.html_favicon or ''
if not isurl(logo):
logo = path.basename(logo)
if not isurl(favicon):
favicon = path.basename(favicon)
self.relations = self.env.collect_relations()
rellinks: List[Tuple[str, str, str, str]] = []
if self.use_index:
rellinks.append(('genindex', _('General Index'), 'I', _('index')))
for indexname, indexcls, _content, _collapse in self.domain_indices:
# if it has a short name
if indexcls.shortname:
rellinks.append((indexname, indexcls.localname,
'', indexcls.shortname))
# back up script_files and css_files to allow adding JS/CSS files to a specific page.
self._script_files = list(self.script_files)
self._css_files = list(self.css_files)
self.globalcontext = {
'embedded': self.embedded,
'project': self.config.project,
'release': return_codes_re.sub('', self.config.release),
'version': self.config.version,
'last_updated': self.last_updated,
'copyright': self.config.copyright,
'master_doc': self.config.root_doc,
'root_doc': self.config.root_doc,
'use_opensearch': self.config.html_use_opensearch,
'docstitle': self.config.html_title,
'shorttitle': self.config.html_short_title,
'show_copyright': self.config.html_show_copyright,
'show_search_summary': self.config.html_show_search_summary,
'show_sphinx': self.config.html_show_sphinx,
'has_source': self.config.html_copy_source,
'show_source': self.config.html_show_sourcelink,
'sourcelink_suffix': self.config.html_sourcelink_suffix,
'file_suffix': self.out_suffix,
'link_suffix': self.link_suffix,
'script_files': self.script_files,
'language': convert_locale_to_language_tag(self.config.language),
'css_files': self.css_files,
'sphinx_version': __display_version__,
'sphinx_version_tuple': sphinx_version,
'style': self._get_style_filename(),
'rellinks': rellinks,
'builder': self.name,
'parents': [],
'logo': logo,
'favicon': favicon,
'html5_doctype': html5_ready and not self.config.html4_writer,
}
if self.theme:
self.globalcontext.update(
('theme_' + key, val) for (key, val) in
self.theme.get_options(self.theme_options).items())
self.globalcontext.update(self.config.html_context)
def get_doc_context(self, docname: str, body: str, metatags: str) -> Dict[str, Any]:
"""Collect items for the template context of a page."""
# find out relations
prev = next = None
parents = []
rellinks = self.globalcontext['rellinks'][:]
related = self.relations.get(docname)
titles = self.env.titles
if related and related[2]:
try:
next = {
'link': self.get_relative_uri(docname, related[2]),
'title': self.render_partial(titles[related[2]])['title']
}
rellinks.append((related[2], next['title'], 'N', _('next')))
except KeyError:
next = None
if related and related[1]:
try:
prev = {
'link': self.get_relative_uri(docname, related[1]),
'title': self.render_partial(titles[related[1]])['title']
}
rellinks.append((related[1], prev['title'], 'P', _('previous')))
except KeyError:
# the relation is (somehow) not in the TOC tree, handle
# that gracefully
prev = None
while related and related[0]:
try:
parents.append(
{'link': self.get_relative_uri(docname, related[0]),
'title': self.render_partial(titles[related[0]])['title']})
except KeyError:
pass
related = self.relations.get(related[0])
if parents:
# remove link to the master file; we have a generic
# "back to index" link already
parents.pop()
parents.reverse()
# title rendered as HTML
title_node = self.env.longtitles.get(docname)
title = self.render_partial(title_node)['title'] if title_node else ''
# Suffix for the document
source_suffix = self.env.doc2path(docname, False)[len(docname):]
# the name for the copied source
if self.config.html_copy_source:
sourcename = docname + source_suffix
if source_suffix != self.config.html_sourcelink_suffix:
sourcename += self.config.html_sourcelink_suffix
else:
sourcename = ''
# metadata for the document
meta = self.env.metadata.get(docname)
# local TOC and global TOC tree
self_toc = TocTree(self.env).get_toc_for(docname, self)
toc = self.render_partial(self_toc)['fragment']
return {
'parents': parents,
'prev': prev,
'next': next,
'title': title,
'meta': meta,
'body': body,
'metatags': metatags,
'rellinks': rellinks,
'sourcename': sourcename,
'toc': toc,
# only display a TOC if there's more than one item to show
'display_toc': (self.env.toc_num_entries[docname] > 1),
'page_source_suffix': source_suffix,
}
def write_doc(self, docname: str, doctree: nodes.document) -> None:
destination = StringOutput(encoding='utf-8')
doctree.settings = self.docsettings
self.secnumbers = self.env.toc_secnumbers.get(docname, {})
self.fignumbers = self.env.toc_fignumbers.get(docname, {})
self.imgpath = relative_uri(self.get_target_uri(docname), '_images')
self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads')
self.current_docname = docname
self.docwriter.write(doctree, destination)
self.docwriter.assemble_parts()
body = self.docwriter.parts['fragment']
metatags = self.docwriter.clean_meta
ctx = self.get_doc_context(docname, body, metatags)
self.handle_page(docname, ctx, event_arg=doctree)
def write_doc_serialized(self, docname: str, doctree: nodes.document) -> None:
self.imgpath = relative_uri(self.get_target_uri(docname), self.imagedir)
self.post_process_images(doctree)
title_node = self.env.longtitles.get(docname)
title = self.render_partial(title_node)['title'] if title_node else ''
self.index_page(docname, doctree, title)
def finish(self) -> None:
self.finish_tasks.add_task(self.gen_indices)
self.finish_tasks.add_task(self.gen_pages_from_extensions)
self.finish_tasks.add_task(self.gen_additional_pages)
self.finish_tasks.add_task(self.copy_image_files)
self.finish_tasks.add_task(self.copy_download_files)
self.finish_tasks.add_task(self.copy_static_files)
self.finish_tasks.add_task(self.copy_extra_files)
self.finish_tasks.add_task(self.write_buildinfo)
# dump the search index
self.handle_finish()
@progress_message(__('generating indices'))
def gen_indices(self) -> None:
# the global general index
if self.use_index:
self.write_genindex()
# the global domain-specific indices
self.write_domain_indices()
def gen_pages_from_extensions(self) -> None:
# pages from extensions
for pagelist in self.events.emit('html-collect-pages'):
for pagename, context, template in pagelist:
self.handle_page(pagename, context, template)
@progress_message(__('writing additional pages'))
def gen_additional_pages(self) -> None:
# additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items():
logger.info(pagename + ' ', nonl=True)
self.handle_page(pagename, {}, template)
# the search page
if self.search:
logger.info('search ', nonl=True)
self.handle_page('search', {}, 'search.html')
# the opensearch xml file
if self.config.html_use_opensearch and self.search:
logger.info('opensearch ', nonl=True)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
def write_genindex(self) -> None:
# the total count of lines for each index letter, used to distribute
# the entries into two columns
genindex = IndexEntries(self.env).create_index(self)
indexcounts = []
for _k, entries in genindex:
indexcounts.append(sum(1 + len(subitems)
for _, (_, subitems, _) in entries))
genindexcontext = {
'genindexentries': genindex,
'genindexcounts': indexcounts,
'split_index': self.config.html_split_index,
}
logger.info('genindex ', nonl=True)
if self.config.html_split_index:
self.handle_page('genindex', genindexcontext,
'genindex-split.html')
self.handle_page('genindex-all', genindexcontext,
'genindex.html')
for (key, entries), count in zip(genindex, indexcounts):
ctx = {'key': key, 'entries': entries, 'count': count,
'genindexentries': genindex}
self.handle_page('genindex-' + key, ctx,
'genindex-single.html')
else:
self.handle_page('genindex', genindexcontext, 'genindex.html')
def write_domain_indices(self) -> None:
for indexname, indexcls, content, collapse in self.domain_indices:
indexcontext = {
'indextitle': indexcls.localname,
'content': content,
'collapse_index': collapse,
}
logger.info(indexname + ' ', nonl=True)
self.handle_page(indexname, indexcontext, 'domainindex.html')
def copy_image_files(self) -> None:
if self.images:
stringify_func = ImageAdapter(self.app.env).get_original_image_uri
ensuredir(path.join(self.outdir, self.imagedir))
for src in status_iterator(self.images, __('copying images... '), "brown",
len(self.images), self.app.verbosity,
stringify_func=stringify_func):
dest = self.images[src]
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, self.imagedir, dest))
except Exception as err:
logger.warning(__('cannot copy image file %r: %s'),
path.join(self.srcdir, src), err)
def copy_download_files(self) -> None:
def to_relpath(f: str) -> str:
return relative_path(self.srcdir, f)
# copy downloadable files
if self.env.dlfiles:
ensuredir(path.join(self.outdir, '_downloads'))
for src in status_iterator(self.env.dlfiles, __('copying downloadable files... '),
"brown", len(self.env.dlfiles), self.app.verbosity,
stringify_func=to_relpath):
try:
dest = path.join(self.outdir, '_downloads', self.env.dlfiles[src][1])
ensuredir(path.dirname(dest))
copyfile(path.join(self.srcdir, src), dest)
except OSError as err:
logger.warning(__('cannot copy downloadable file %r: %s'),
path.join(self.srcdir, src), err)
def create_pygments_style_file(self) -> None:
"""create a style file for pygments."""
with open(path.join(self.outdir, '_static', 'pygments.css'), 'w') as f:
f.write(self.highlighter.get_stylesheet())
if self.dark_highlighter:
with open(path.join(self.outdir, '_static', 'pygments_dark.css'), 'w') as f:
f.write(self.dark_highlighter.get_stylesheet())
def copy_translation_js(self) -> None:
"""Copy a JavaScript file for translations."""
jsfile = self._get_translations_js()
if jsfile:
copyfile(jsfile, path.join(self.outdir, '_static', 'translations.js'))
def copy_stemmer_js(self) -> None:
"""Copy a JavaScript file for stemmer."""
if self.indexer is not None:
if hasattr(self.indexer, 'get_js_stemmer_rawcodes'):
for jsfile in self.indexer.get_js_stemmer_rawcodes():
copyfile(jsfile, path.join(self.outdir, '_static', path.basename(jsfile)))
else:
jsfile = self.indexer.get_js_stemmer_rawcode()
if jsfile:
copyfile(jsfile, path.join(self.outdir, '_static', '_stemmer.js'))
def copy_theme_static_files(self, context: Dict) -> None:
def onerror(filename: str, error: Exception) -> None:
logger.warning(__('Failed to copy a file in html_static_file: %s: %r'),
filename, error)
if self.theme:
for entry in self.theme.get_theme_dirs()[::-1]:
copy_asset(path.join(entry, 'static'),
path.join(self.outdir, '_static'),
excluded=DOTFILES, context=context,
renderer=self.templates, onerror=onerror)
def copy_html_static_files(self, context: Dict) -> None:
def onerror(filename: str, error: Exception) -> None:
logger.warning(__('Failed to copy a file in html_static_file: %s: %r'),
filename, error)
excluded = Matcher(self.config.exclude_patterns + ["**/.*"])
for entry in self.config.html_static_path:
copy_asset(path.join(self.confdir, entry),
path.join(self.outdir, '_static'),
excluded, context=context, renderer=self.templates, onerror=onerror)
def copy_html_logo(self) -> None:
if self.config.html_logo and not isurl(self.config.html_logo):
copy_asset(path.join(self.confdir, self.config.html_logo),
path.join(self.outdir, '_static'))
def copy_html_favicon(self) -> None:
if self.config.html_favicon and not isurl(self.config.html_favicon):
copy_asset(path.join(self.confdir, self.config.html_favicon),
path.join(self.outdir, '_static'))
def copy_static_files(self) -> None:
try:
with progress_message(__('copying static files')):
ensuredir(path.join(self.outdir, '_static'))
# prepare context for templates
context = self.globalcontext.copy()
if self.indexer is not None:
context.update(self.indexer.context_for_searchtool())
self.create_pygments_style_file()
self.copy_translation_js()
self.copy_stemmer_js()
self.copy_theme_static_files(context)
self.copy_html_static_files(context)
self.copy_html_logo()
self.copy_html_favicon()
except OSError as err:
logger.warning(__('cannot copy static file %r'), err)
def copy_extra_files(self) -> None:
"""copy html_extra_path files."""
try:
with progress_message(__('copying extra files')):
excluded = Matcher(self.config.exclude_patterns)
for extra_path in self.config.html_extra_path:
entry = path.join(self.confdir, extra_path)
copy_asset(entry, self.outdir, excluded)
except OSError as err:
logger.warning(__('cannot copy extra file %r'), err)
def write_buildinfo(self) -> None:
try:
with open(path.join(self.outdir, '.buildinfo'), 'w') as fp:
self.build_info.dump(fp)
except OSError as exc:
logger.warning(__('Failed to write build info file: %r'), exc)
def cleanup(self) -> None:
# clean up theme stuff
if self.theme:
self.theme.cleanup()
def post_process_images(self, doctree: Node) -> None:
"""Pick the best candidate for an image and link down-scaled images to
their high res version.
"""
Builder.post_process_images(self, doctree)
if self.config.html_scaled_image_link and self.html_scaled_image_link:
for node in doctree.findall(nodes.image):
if not any((key in node) for key in ['scale', 'width', 'height']):
# resizing options are not given. scaled image link is available
# only for resized images.
continue
elif isinstance(node.parent, nodes.reference):
# A image having hyperlink target
continue
elif 'no-scaled-link' in node['classes']:
# scaled image link is disabled for this node
continue
uri = node['uri']
reference = nodes.reference('', '', internal=True)
if uri in self.images:
reference['refuri'] = posixpath.join(self.imgpath,
self.images[uri])
else:
reference['refuri'] = uri
node.replace_self(reference)
reference.append(node)
def load_indexer(self, docnames: Iterable[str]) -> None:
keep = set(self.env.all_docs) - set(docnames)
try:
searchindexfn = path.join(self.outdir, self.searchindex_filename)
if self.indexer_dumps_unicode:
with open(searchindexfn, encoding='utf-8') as ft:
self.indexer.load(ft, self.indexer_format)
else:
with open(searchindexfn, 'rb') as fb:
self.indexer.load(fb, self.indexer_format)
except (OSError, ValueError):
if keep:
logger.warning(__('search index couldn\'t be loaded, but not all '
'documents will be built: the index will be '
'incomplete.'))
# delete all entries for files that will be rebuilt
self.indexer.prune(keep)
def index_page(self, pagename: str, doctree: nodes.document, title: str) -> None:
# only index pages with title
if self.indexer is not None and title:
filename = self.env.doc2path(pagename, base=None)
metadata = self.env.metadata.get(pagename, {})
if 'nosearch' in metadata:
self.indexer.feed(pagename, filename, '', new_document(''))
else:
self.indexer.feed(pagename, filename, title, doctree)
def _get_local_toctree(self, docname: str, collapse: bool = True, **kwargs: Any) -> str:
if 'includehidden' not in kwargs:
kwargs['includehidden'] = False
if kwargs.get('maxdepth') == '':
kwargs.pop('maxdepth')
return self.render_partial(TocTree(self.env).get_toctree_for(
docname, self, collapse, **kwargs))['fragment']
def get_outfilename(self, pagename: str) -> str:
return path.join(self.outdir, os_path(pagename) + self.out_suffix)
def add_sidebars(self, pagename: str, ctx: Dict) -> None:
def has_wildcard(pattern: str) -> bool:
return any(char in pattern for char in '*?[')
sidebars = None
matched = None
customsidebar = None
# default sidebars settings for selected theme
if self.theme.name == 'alabaster':
# provide default settings for alabaster (for compatibility)
# Note: this will be removed before Sphinx-2.0
try:
# get default sidebars settings from alabaster (if defined)
theme_default_sidebars = self.theme.config.get('theme', 'sidebars')
if theme_default_sidebars:
sidebars = [name.strip() for name in theme_default_sidebars.split(',')]
except Exception:
# fallback to better default settings
sidebars = ['about.html', 'navigation.html', 'relations.html',
'searchbox.html', 'donate.html']
else:
theme_default_sidebars = self.theme.get_config('theme', 'sidebars', None)
if theme_default_sidebars:
sidebars = [name.strip() for name in theme_default_sidebars.split(',')]
# user sidebar settings
html_sidebars = self.get_builder_config('sidebars', 'html')
for pattern, patsidebars in html_sidebars.items():
if patmatch(pagename, pattern):
if matched:
if has_wildcard(pattern):
# warn if both patterns contain wildcards
if has_wildcard(matched):
logger.warning(__('page %s matches two patterns in '
'html_sidebars: %r and %r'),
pagename, matched, pattern)
# else the already matched pattern is more specific
# than the present one, because it contains no wildcard
continue
matched = pattern
sidebars = patsidebars
if sidebars is None:
# keep defaults
pass
ctx['sidebars'] = sidebars
ctx['customsidebar'] = customsidebar
# --------- these are overwritten by the serialization builder
def get_target_uri(self, docname: str, typ: str = None) -> str:
return quote(docname) + self.link_suffix
def handle_page(self, pagename: str, addctx: Dict, templatename: str = 'page.html',
outfilename: str = None, event_arg: Any = None) -> None:
ctx = self.globalcontext.copy()
# current_page_name is backwards compatibility
ctx['pagename'] = ctx['current_page_name'] = pagename
ctx['encoding'] = self.config.html_output_encoding
default_baseuri = self.get_target_uri(pagename)
# in the singlehtml builder, default_baseuri still contains an #anchor
# part, which relative_uri doesn't really like...
default_baseuri = default_baseuri.rsplit('#', 1)[0]
if self.config.html_baseurl:
ctx['pageurl'] = posixpath.join(self.config.html_baseurl,
pagename + self.out_suffix)
else:
ctx['pageurl'] = None
def pathto(otheruri: str, resource: bool = False, baseuri: str = default_baseuri) -> str: # NOQA
if resource and '://' in otheruri:
# allow non-local resources given by scheme
return otheruri
elif not resource:
otheruri = self.get_target_uri(otheruri)
uri = relative_uri(baseuri, otheruri) or '#'
if uri == '#' and not self.allow_sharp_as_current_path:
uri = baseuri
return uri
ctx['pathto'] = pathto
def hasdoc(name: str) -> bool:
if name in self.env.all_docs:
return True
elif name == 'search' and self.search:
return True
elif name == 'genindex' and self.get_builder_config('use_index', 'html'):
return True
return False
ctx['hasdoc'] = hasdoc
ctx['toctree'] = lambda **kwargs: self._get_local_toctree(pagename, **kwargs)
self.add_sidebars(pagename, ctx)
ctx.update(addctx)
# revert script_files and css_files
self.script_files[:] = self._script_files
self.css_files[:] = self._css_files
self.update_page_context(pagename, templatename, ctx, event_arg)
newtmpl = self.app.emit_firstresult('html-page-context', pagename,
templatename, ctx, event_arg)
if newtmpl:
templatename = newtmpl
# sort JS/CSS before rendering HTML
try:
# Convert script_files to list to support non-list script_files (refs: #8889)
ctx['script_files'] = sorted(list(ctx['script_files']), key=lambda js: js.priority)
except AttributeError:
# Skip sorting if users modifies script_files directly (maybe via `html_context`).
# refs: #8885
#
# Note: priority sorting feature will not work in this case.
pass
try:
ctx['css_files'] = sorted(list(ctx['css_files']), key=lambda css: css.priority)
except AttributeError:
pass
try:
output = self.templates.render(templatename, ctx)
except UnicodeError:
logger.warning(__("a Unicode error occurred when rendering the page %s. "
"Please make sure all config values that contain "
"non-ASCII content are Unicode strings."), pagename)
return
except Exception as exc:
raise ThemeError(__("An error happened in rendering the page %s.\nReason: %r") %
(pagename, exc)) from exc
if not outfilename:
outfilename = self.get_outfilename(pagename)
# outfilename's path is in general different from self.outdir
ensuredir(path.dirname(outfilename))
try:
with open(outfilename, 'w', encoding=ctx['encoding'],
errors='xmlcharrefreplace') as f:
f.write(output)
except OSError as err:
logger.warning(__("error writing file %s: %s"), outfilename, err)
if self.copysource and ctx.get('sourcename'):
# copy the source file for the "show source" link
source_name = path.join(self.outdir, '_sources',
os_path(ctx['sourcename']))
ensuredir(path.dirname(source_name))
copyfile(self.env.doc2path(pagename), source_name)
def update_page_context(self, pagename: str, templatename: str,
ctx: Dict, event_arg: Any) -> None:
pass
def handle_finish(self) -> None:
if self.indexer:
self.finish_tasks.add_task(self.dump_search_index)
self.finish_tasks.add_task(self.dump_inventory)
@progress_message(__('dumping object inventory'))
def dump_inventory(self) -> None:
InventoryFile.dump(path.join(self.outdir, INVENTORY_FILENAME), self.env, self)
def dump_search_index(self) -> None:
with progress_message(__('dumping search index in %s') % self.indexer.label()):
self.indexer.prune(self.env.all_docs)
searchindexfn = path.join(self.outdir, self.searchindex_filename)
# first write to a temporary file, so that if dumping fails,
# the existing index won't be overwritten
if self.indexer_dumps_unicode:
with open(searchindexfn + '.tmp', 'w', encoding='utf-8') as ft:
self.indexer.dump(ft, self.indexer_format)
else:
with open(searchindexfn + '.tmp', 'wb') as fb:
self.indexer.dump(fb, self.indexer_format)
os.replace(searchindexfn + '.tmp', searchindexfn)
def convert_html_css_files(app: Sphinx, config: Config) -> None:
"""This converts string styled html_css_files to tuple styled one."""
html_css_files: List[Tuple[str, Dict]] = []
for entry in config.html_css_files:
if isinstance(entry, str):
html_css_files.append((entry, {}))
else:
try:
filename, attrs = entry
html_css_files.append((filename, attrs))
except Exception:
logger.warning(__('invalid css_file: %r, ignored'), entry)
continue
config.html_css_files = html_css_files # type: ignore
def convert_html_js_files(app: Sphinx, config: Config) -> None:
"""This converts string styled html_js_files to tuple styled one."""
html_js_files: List[Tuple[str, Dict]] = []
for entry in config.html_js_files:
if isinstance(entry, str):
html_js_files.append((entry, {}))
else:
try:
filename, attrs = entry
html_js_files.append((filename, attrs))
except Exception:
logger.warning(__('invalid js_file: %r, ignored'), entry)
continue
config.html_js_files = html_js_files # type: ignore
def setup_css_tag_helper(app: Sphinx, pagename: str, templatename: str,
context: Dict, doctree: Node) -> None:
"""Set up css_tag() template helper.
.. note:: This set up function is added to keep compatibility with webhelper.
"""
pathto = context.get('pathto')
def css_tag(css: Stylesheet) -> str:
attrs = []
for key in sorted(css.attributes):
value = css.attributes[key]
if value is not None:
attrs.append('%s="%s"' % (key, html.escape(value, True)))
attrs.append('href="%s"' % pathto(css.filename, resource=True))
return '<link %s />' % ' '.join(attrs)
context['css_tag'] = css_tag
def setup_js_tag_helper(app: Sphinx, pagename: str, templatename: str,
context: Dict, doctree: Node) -> None:
"""Set up js_tag() template helper.
.. note:: This set up function is added to keep compatibility with webhelper.
"""
pathto = context.get('pathto')
def js_tag(js: JavaScript) -> str:
attrs = []
body = ''
if isinstance(js, JavaScript):
for key in sorted(js.attributes):
value = js.attributes[key]
if value is not None:
if key == 'body':
body = value
elif key == 'data_url_root':
attrs.append('data-url_root="%s"' % pathto('', resource=True))
else:
attrs.append('%s="%s"' % (key, html.escape(value, True)))
if js.filename:
attrs.append('src="%s"' % pathto(js.filename, resource=True))
else:
# str value (old styled)
attrs.append('src="%s"' % pathto(js, resource=True))
if attrs:
return '<script %s>%s</script>' % (' '.join(attrs), body)
else:
return '<script>%s</script>' % body
context['js_tag'] = js_tag
def setup_resource_paths(app: Sphinx, pagename: str, templatename: str,
context: Dict, doctree: Node) -> None:
"""Set up relative resource paths."""
pathto = context.get('pathto')
# favicon_url
favicon = context.get('favicon')
if favicon and not isurl(favicon):
context['favicon_url'] = pathto('_static/' + favicon, resource=True)
else:
context['favicon_url'] = favicon
# logo_url
logo = context.get('logo')
if logo and not isurl(logo):
context['logo_url'] = pathto('_static/' + logo, resource=True)
else:
context['logo_url'] = logo
def validate_math_renderer(app: Sphinx) -> None:
if app.builder.format != 'html':
return
name = app.builder.math_renderer_name # type: ignore
if name is None:
raise ConfigError(__('Many math_renderers are registered. '
'But no math_renderer is selected.'))
elif name not in app.registry.html_inline_math_renderers:
raise ConfigError(__('Unknown math_renderer %r is given.') % name)
def validate_html_extra_path(app: Sphinx, config: Config) -> None:
"""Check html_extra_paths setting."""
for entry in config.html_extra_path[:]:
extra_path = path.normpath(path.join(app.confdir, entry))
if not path.exists(extra_path):
logger.warning(__('html_extra_path entry %r does not exist'), entry)
config.html_extra_path.remove(entry)
elif (path.splitdrive(app.outdir)[0] == path.splitdrive(extra_path)[0] and
path.commonpath([app.outdir, extra_path]) == app.outdir):
logger.warning(__('html_extra_path entry %r is placed inside outdir'), entry)
config.html_extra_path.remove(entry)
def validate_html_static_path(app: Sphinx, config: Config) -> None:
"""Check html_static_paths setting."""
for entry in config.html_static_path[:]:
static_path = path.normpath(path.join(app.confdir, entry))
if not path.exists(static_path):
logger.warning(__('html_static_path entry %r does not exist'), entry)
config.html_static_path.remove(entry)
elif (path.splitdrive(app.outdir)[0] == path.splitdrive(static_path)[0] and
path.commonpath([app.outdir, static_path]) == app.outdir):
logger.warning(__('html_static_path entry %r is placed inside outdir'), entry)
config.html_static_path.remove(entry)
def validate_html_logo(app: Sphinx, config: Config) -> None:
"""Check html_logo setting."""
if (config.html_logo and
not path.isfile(path.join(app.confdir, config.html_logo)) and
not isurl(config.html_logo)):
logger.warning(__('logo file %r does not exist'), config.html_logo)
config.html_logo = None # type: ignore
def validate_html_favicon(app: Sphinx, config: Config) -> None:
"""Check html_favicon setting."""
if (config.html_favicon and
not path.isfile(path.join(app.confdir, config.html_favicon)) and
not isurl(config.html_favicon)):
logger.warning(__('favicon file %r does not exist'), config.html_favicon)
config.html_favicon = None # type: ignore
class _stable_repr_object():
def __repr__(self):
return '<object>'
UNSET = _stable_repr_object()
def migrate_html_add_permalinks(app: Sphinx, config: Config) -> None:
"""Migrate html_add_permalinks to html_permalinks*."""
html_add_permalinks = config.html_add_permalinks
if html_add_permalinks is UNSET:
return
# RemovedInSphinx60Warning
logger.warning(__('html_add_permalinks has been deprecated since v3.5.0. '
'Please use html_permalinks and html_permalinks_icon instead.'))
if not html_add_permalinks:
config.html_permalinks = False # type: ignore[attr-defined]
return
config.html_permalinks_icon = html.escape( # type: ignore[attr-defined]
html_add_permalinks
)
# for compatibility
import sphinxcontrib.serializinghtml # NOQA
import sphinx.builders.dirhtml # NOQA
import sphinx.builders.singlehtml # NOQA
def setup(app: Sphinx) -> Dict[str, Any]:
# builders
app.add_builder(StandaloneHTMLBuilder)
# config values
app.add_config_value('html_theme', 'alabaster', 'html')
app.add_config_value('html_theme_path', [], 'html')
app.add_config_value('html_theme_options', {}, 'html')
app.add_config_value('html_title',
lambda self: _('%s %s documentation') % (self.project, self.release),
'html', [str])
app.add_config_value('html_short_title', lambda self: self.html_title, 'html')
app.add_config_value('html_style', None, 'html', [str])
app.add_config_value('html_logo', None, 'html', [str])
app.add_config_value('html_favicon', None, 'html', [str])
app.add_config_value('html_css_files', [], 'html')
app.add_config_value('html_js_files', [], 'html')
app.add_config_value('html_static_path', [], 'html')
app.add_config_value('html_extra_path', [], 'html')
app.add_config_value('html_last_updated_fmt', None, 'html', [str])
app.add_config_value('html_sidebars', {}, 'html')
app.add_config_value('html_additional_pages', {}, 'html')
app.add_config_value('html_domain_indices', True, 'html', [list])
app.add_config_value('html_add_permalinks', UNSET, 'html')
app.add_config_value('html_permalinks', True, 'html')
app.add_config_value('html_permalinks_icon', '¶', 'html')
app.add_config_value('html_use_index', True, 'html')
app.add_config_value('html_split_index', False, 'html')
app.add_config_value('html_copy_source', True, 'html')
app.add_config_value('html_show_sourcelink', True, 'html')
app.add_config_value('html_sourcelink_suffix', '.txt', 'html')
app.add_config_value('html_use_opensearch', '', 'html')
app.add_config_value('html_file_suffix', None, 'html', [str])
app.add_config_value('html_link_suffix', None, 'html', [str])
app.add_config_value('html_show_copyright', True, 'html')
app.add_config_value('html_show_search_summary', True, 'html')
app.add_config_value('html_show_sphinx', True, 'html')
app.add_config_value('html_context', {}, 'html')
app.add_config_value('html_output_encoding', 'utf-8', 'html')
app.add_config_value('html_compact_lists', True, 'html')
app.add_config_value('html_secnumber_suffix', '. ', 'html')
app.add_config_value('html_search_language', None, 'html', [str])
app.add_config_value('html_search_options', {}, 'html')
app.add_config_value('html_search_scorer', '', None)
app.add_config_value('html_scaled_image_link', True, 'html')
app.add_config_value('html_baseurl', '', 'html')
app.add_config_value('html_codeblock_linenos_style', 'inline', 'html', # RemovedInSphinx60Warning # NOQA
ENUM('table', 'inline'))
app.add_config_value('html_math_renderer', None, 'env')
app.add_config_value('html4_writer', False, 'html')
# events
app.add_event('html-collect-pages')
app.add_event('html-page-context')
# event handlers
app.connect('config-inited', convert_html_css_files, priority=800)
app.connect('config-inited', convert_html_js_files, priority=800)
app.connect('config-inited', migrate_html_add_permalinks, priority=800)
app.connect('config-inited', validate_html_extra_path, priority=800)
app.connect('config-inited', validate_html_static_path, priority=800)
app.connect('config-inited', validate_html_logo, priority=800)
app.connect('config-inited', validate_html_favicon, priority=800)
app.connect('builder-inited', validate_math_renderer)
app.connect('html-page-context', setup_css_tag_helper)
app.connect('html-page-context', setup_js_tag_helper)
app.connect('html-page-context', setup_resource_paths)
# load default math renderer
app.setup_extension('sphinx.ext.mathjax')
# load transforms for HTML builder
app.setup_extension('sphinx.builders.html.transforms')
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 41.707405 | 116 | 0.602965 |
import html
import os
import posixpath
import re
import sys
from datetime import datetime
from os import path
from typing import IO, Any, Dict, Iterable, Iterator, List, Optional, Set, Tuple, Type
from urllib.parse import quote
from docutils import nodes
from docutils.core import publish_parts
from docutils.frontend import OptionParser
from docutils.io import DocTreeInput, StringOutput
from docutils.nodes import Node
from docutils.utils import relative_path
from sphinx import __display_version__, package_dir
from sphinx import version_info as sphinx_version
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.config import ENUM, Config
from sphinx.domains import Domain, Index, IndexEntry
from sphinx.environment.adapters.asset import ImageAdapter
from sphinx.environment.adapters.indexentries import IndexEntries
from sphinx.environment.adapters.toctree import TocTree
from sphinx.errors import ConfigError, ThemeError
from sphinx.highlighting import PygmentsBridge
from sphinx.locale import _, __
from sphinx.search import js_index
from sphinx.theming import HTMLThemeFactory
from sphinx.util import isurl, logging, md5, progress_message, status_iterator
from sphinx.util.docutils import is_html5_writer_available, new_document
from sphinx.util.fileutil import copy_asset
from sphinx.util.i18n import format_date
from sphinx.util.inventory import InventoryFile
from sphinx.util.matching import DOTFILES, Matcher, patmatch
from sphinx.util.osutil import copyfile, ensuredir, os_path, relative_uri
from sphinx.util.tags import Tags
from sphinx.writers.html import HTMLTranslator, HTMLWriter
if is_html5_writer_available():
from sphinx.writers.html5 import HTML5Translator
html5_ready = True
else:
html5_ready = False
INVENTORY_FILENAME = 'objects.inv'
logger = logging.getLogger(__name__)
return_codes_re = re.compile('[\r\n]+')
def get_stable_hash(obj: Any) -> str:
if isinstance(obj, dict):
return get_stable_hash(list(obj.items()))
elif isinstance(obj, (list, tuple)):
obj = sorted(get_stable_hash(o) for o in obj)
return md5(str(obj).encode()).hexdigest()
def convert_locale_to_language_tag(locale: Optional[str]) -> Optional[str]:
if locale:
return locale.replace('_', '-')
else:
return None
class Stylesheet(str):
attributes: Dict[str, str] = None
filename: str = None
priority: int = None
def __new__(cls, filename: str, *args: str, priority: int = 500, **attributes: Any
) -> "Stylesheet":
self = str.__new__(cls, filename)
self.filename = filename
self.priority = priority
self.attributes = attributes
self.attributes.setdefault('rel', 'stylesheet')
self.attributes.setdefault('type', 'text/css')
if args:
self.attributes['rel'] = args[0]
self.attributes['title'] = args[1]
return self
class JavaScript(str):
attributes: Dict[str, str] = None
filename: str = None
priority: int = None
def __new__(cls, filename: str, priority: int = 500, **attributes: str) -> "JavaScript":
self = str.__new__(cls, filename)
self.filename = filename
self.priority = priority
self.attributes = attributes
return self
class BuildInfo:
@classmethod
def load(cls, f: IO) -> "BuildInfo":
try:
lines = f.readlines()
assert lines[0].rstrip() == '# Sphinx build info version 1'
assert lines[2].startswith('config: ')
assert lines[3].startswith('tags: ')
build_info = BuildInfo()
build_info.config_hash = lines[2].split()[1].strip()
build_info.tags_hash = lines[3].split()[1].strip()
return build_info
except Exception as exc:
raise ValueError(__('build info file is broken: %r') % exc) from exc
def __init__(self, config: Config = None, tags: Tags = None, config_categories: List[str] = []) -> None:
self.config_hash = ''
self.tags_hash = ''
if config:
values = {c.name: c.value for c in config.filter(config_categories)}
self.config_hash = get_stable_hash(values)
if tags:
self.tags_hash = get_stable_hash(sorted(tags))
def __eq__(self, other: "BuildInfo") -> bool:
return (self.config_hash == other.config_hash and
self.tags_hash == other.tags_hash)
def dump(self, f: IO) -> None:
f.write('# Sphinx build info version 1\n'
'# This file hashes the configuration used when building these files.'
' When it is not found, a full rebuild will be done.\n'
'config: %s\n'
'tags: %s\n' %
(self.config_hash, self.tags_hash))
class StandaloneHTMLBuilder(Builder):
name = 'html'
format = 'html'
epilog = __('The HTML pages are in %(outdir)s.')
copysource = True
allow_parallel = True
out_suffix = '.html'
link_suffix = '.html'
indexer_format: Any = js_index
indexer_dumps_unicode = True
html_scaled_image_link = True
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
supported_remote_images = True
supported_data_uri_images = True
searchindex_filename = 'searchindex.js'
add_permalinks = True
allow_sharp_as_current_path = True
embedded = False
search = True
use_index = False
download_support = True
imgpath: str = None
domain_indices: List[Tuple[str, Type[Index], List[Tuple[str, List[IndexEntry]]], bool]] = []
def __init__(self, app: Sphinx) -> None:
super().__init__(app)
self.css_files: List[Stylesheet] = []
self.script_files: List[JavaScript] = []
def init(self) -> None:
self.build_info = self.create_build_info()
self.imagedir = '_images'
self.secnumbers: Dict[str, Tuple[int, ...]] = {}
self.current_docname: str = None
self.init_templates()
self.init_highlighter()
self.init_css_files()
self.init_js_files()
html_file_suffix = self.get_builder_config('file_suffix', 'html')
if html_file_suffix is not None:
self.out_suffix = html_file_suffix
html_link_suffix = self.get_builder_config('link_suffix', 'html')
if html_link_suffix is not None:
self.link_suffix = html_link_suffix
else:
self.link_suffix = self.out_suffix
self.use_index = self.get_builder_config('use_index', 'html')
def create_build_info(self) -> BuildInfo:
return BuildInfo(self.config, self.tags, ['html'])
def _get_translations_js(self) -> str:
candidates = [path.join(dir, self.config.language,
'LC_MESSAGES', 'sphinx.js')
for dir in self.config.locale_dirs] + \
[path.join(package_dir, 'locale', self.config.language,
'LC_MESSAGES', 'sphinx.js'),
path.join(sys.prefix, 'share/sphinx/locale',
self.config.language, 'sphinx.js')]
for jsfile in candidates:
if path.isfile(jsfile):
return jsfile
return None
def _get_style_filename(self) -> str:
if self.config.html_style is not None:
return self.config.html_style
elif self.theme:
return self.theme.get_config('theme', 'stylesheet')
else:
return 'default.css'
def get_theme_config(self) -> Tuple[str, Dict]:
return self.config.html_theme, self.config.html_theme_options
def init_templates(self) -> None:
theme_factory = HTMLThemeFactory(self.app)
themename, themeoptions = self.get_theme_config()
self.theme = theme_factory.create(themename)
self.theme_options = themeoptions.copy()
self.create_template_bridge()
self.templates.init(self, self.theme)
def init_highlighter(self) -> None:
if self.config.pygments_style is not None:
style = self.config.pygments_style
elif self.theme:
style = self.theme.get_config('theme', 'pygments_style', 'none')
else:
style = 'sphinx'
self.highlighter = PygmentsBridge('html', style)
if self.theme:
dark_style = self.theme.get_config('theme', 'pygments_dark_style', None)
else:
dark_style = None
if dark_style is not None:
self.dark_highlighter = PygmentsBridge('html', dark_style)
self.app.add_css_file('pygments_dark.css',
media='(prefers-color-scheme: dark)',
id='pygments_dark_css')
else:
self.dark_highlighter = None
def init_css_files(self) -> None:
self.css_files = []
self.add_css_file('pygments.css', priority=200)
self.add_css_file(self._get_style_filename(), priority=200)
for filename, attrs in self.app.registry.css_files:
self.add_css_file(filename, **attrs)
for filename, attrs in self.get_builder_config('css_files', 'html'):
attrs.setdefault('priority', 800)
self.add_css_file(filename, **attrs)
def add_css_file(self, filename: str, **kwargs: Any) -> None:
if '://' not in filename:
filename = posixpath.join('_static', filename)
self.css_files.append(Stylesheet(filename, **kwargs))
def init_js_files(self) -> None:
self.script_files = []
self.add_js_file('documentation_options.js', id="documentation_options",
data_url_root='', priority=200)
self.add_js_file('jquery.js', priority=200)
self.add_js_file('underscore.js', priority=200)
self.add_js_file('_sphinx_javascript_frameworks_compat.js', priority=200)
self.add_js_file('doctools.js', priority=200)
for filename, attrs in self.app.registry.js_files:
self.add_js_file(filename, **attrs)
for filename, attrs in self.get_builder_config('js_files', 'html'):
attrs.setdefault('priority', 800)
self.add_js_file(filename, **attrs)
if self._get_translations_js():
self.add_js_file('translations.js')
def add_js_file(self, filename: str, **kwargs: Any) -> None:
if filename and '://' not in filename:
filename = posixpath.join('_static', filename)
self.script_files.append(JavaScript(filename, **kwargs))
@property
def default_translator_class(self) -> Type[nodes.NodeVisitor]:
if not html5_ready or self.config.html4_writer:
return HTMLTranslator
else:
return HTML5Translator
@property
def math_renderer_name(self) -> str:
name = self.get_builder_config('math_renderer', 'html')
if name is not None:
return name
else:
renderers = list(self.app.registry.html_inline_math_renderers)
if len(renderers) == 1:
return renderers[0]
elif len(renderers) == 2:
renderers.remove('mathjax')
return renderers[0]
else:
return None
def get_outdated_docs(self) -> Iterator[str]:
try:
with open(path.join(self.outdir, '.buildinfo')) as fp:
buildinfo = BuildInfo.load(fp)
if self.build_info != buildinfo:
logger.debug('[build target] did not match: build_info ')
yield from self.env.found_docs
return
except ValueError as exc:
logger.warning(__('Failed to read build info file: %r'), exc)
except OSError:
# ignore errors on reading
pass
if self.templates:
template_mtime = self.templates.newest_template_mtime()
else:
template_mtime = 0
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
logger.debug('[build target] did not in env: %r', docname)
yield docname
continue
targetname = self.get_outfilename(docname)
try:
targetmtime = path.getmtime(targetname)
except Exception:
targetmtime = 0
try:
srcmtime = max(path.getmtime(self.env.doc2path(docname)),
template_mtime)
if srcmtime > targetmtime:
logger.debug(
'[build target] targetname %r(%s), template(%s), docname %r(%s)',
targetname,
datetime.utcfromtimestamp(targetmtime),
datetime.utcfromtimestamp(template_mtime),
docname,
datetime.utcfromtimestamp(path.getmtime(self.env.doc2path(docname))),
)
yield docname
except OSError:
# source doesn't exist anymore
pass
def get_asset_paths(self) -> List[str]:
return self.config.html_extra_path + self.config.html_static_path
def render_partial(self, node: Node) -> Dict[str, str]:
if node is None:
return {'fragment': ''}
doc = new_document('<partial node>')
doc.append(node)
writer = HTMLWriter(self)
return publish_parts(reader_name='doctree',
writer=writer,
source_class=DocTreeInput,
settings_overrides={'output_encoding': 'unicode'},
source=doc)
def prepare_writing(self, docnames: Set[str]) -> None:
self.indexer = None
if self.search:
from sphinx.search import IndexBuilder
lang = self.config.html_search_language or self.config.language
self.indexer = IndexBuilder(self.env, lang,
self.config.html_search_options,
self.config.html_search_scorer)
self.load_indexer(docnames)
self.docwriter = HTMLWriter(self)
self.docsettings: Any = OptionParser(
defaults=self.env.settings,
components=(self.docwriter,),
read_config_files=True).get_default_values()
self.docsettings.compact_lists = bool(self.config.html_compact_lists)
self.domain_indices = []
indices_config = self.config.html_domain_indices
if indices_config:
for domain_name in sorted(self.env.domains):
domain: Domain = self.env.domains[domain_name]
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
content, collapse = indexcls(domain).generate()
if content:
self.domain_indices.append(
(indexname, indexcls, content, collapse))
lufmt = self.config.html_last_updated_fmt
if lufmt is not None:
self.last_updated = format_date(lufmt or _('%b %d, %Y'),
language=self.config.language)
else:
self.last_updated = None
# If the logo or favicon are urls, keep them as-is, otherwise
# strip the relative path as the files will be copied into _static.
logo = self.config.html_logo or ''
favicon = self.config.html_favicon or ''
if not isurl(logo):
logo = path.basename(logo)
if not isurl(favicon):
favicon = path.basename(favicon)
self.relations = self.env.collect_relations()
rellinks: List[Tuple[str, str, str, str]] = []
if self.use_index:
rellinks.append(('genindex', _('General Index'), 'I', _('index')))
for indexname, indexcls, _content, _collapse in self.domain_indices:
# if it has a short name
if indexcls.shortname:
rellinks.append((indexname, indexcls.localname,
'', indexcls.shortname))
# back up script_files and css_files to allow adding JS/CSS files to a specific page.
self._script_files = list(self.script_files)
self._css_files = list(self.css_files)
self.globalcontext = {
'embedded': self.embedded,
'project': self.config.project,
'release': return_codes_re.sub('', self.config.release),
'version': self.config.version,
'last_updated': self.last_updated,
'copyright': self.config.copyright,
'master_doc': self.config.root_doc,
'root_doc': self.config.root_doc,
'use_opensearch': self.config.html_use_opensearch,
'docstitle': self.config.html_title,
'shorttitle': self.config.html_short_title,
'show_copyright': self.config.html_show_copyright,
'show_search_summary': self.config.html_show_search_summary,
'show_sphinx': self.config.html_show_sphinx,
'has_source': self.config.html_copy_source,
'show_source': self.config.html_show_sourcelink,
'sourcelink_suffix': self.config.html_sourcelink_suffix,
'file_suffix': self.out_suffix,
'link_suffix': self.link_suffix,
'script_files': self.script_files,
'language': convert_locale_to_language_tag(self.config.language),
'css_files': self.css_files,
'sphinx_version': __display_version__,
'sphinx_version_tuple': sphinx_version,
'style': self._get_style_filename(),
'rellinks': rellinks,
'builder': self.name,
'parents': [],
'logo': logo,
'favicon': favicon,
'html5_doctype': html5_ready and not self.config.html4_writer,
}
if self.theme:
self.globalcontext.update(
('theme_' + key, val) for (key, val) in
self.theme.get_options(self.theme_options).items())
self.globalcontext.update(self.config.html_context)
def get_doc_context(self, docname: str, body: str, metatags: str) -> Dict[str, Any]:
# find out relations
prev = next = None
parents = []
rellinks = self.globalcontext['rellinks'][:]
related = self.relations.get(docname)
titles = self.env.titles
if related and related[2]:
try:
next = {
'link': self.get_relative_uri(docname, related[2]),
'title': self.render_partial(titles[related[2]])['title']
}
rellinks.append((related[2], next['title'], 'N', _('next')))
except KeyError:
next = None
if related and related[1]:
try:
prev = {
'link': self.get_relative_uri(docname, related[1]),
'title': self.render_partial(titles[related[1]])['title']
}
rellinks.append((related[1], prev['title'], 'P', _('previous')))
except KeyError:
# the relation is (somehow) not in the TOC tree, handle
# that gracefully
prev = None
while related and related[0]:
try:
parents.append(
{'link': self.get_relative_uri(docname, related[0]),
'title': self.render_partial(titles[related[0]])['title']})
except KeyError:
pass
related = self.relations.get(related[0])
if parents:
# remove link to the master file; we have a generic
# "back to index" link already
parents.pop()
parents.reverse()
# title rendered as HTML
title_node = self.env.longtitles.get(docname)
title = self.render_partial(title_node)['title'] if title_node else ''
# Suffix for the document
source_suffix = self.env.doc2path(docname, False)[len(docname):]
# the name for the copied source
if self.config.html_copy_source:
sourcename = docname + source_suffix
if source_suffix != self.config.html_sourcelink_suffix:
sourcename += self.config.html_sourcelink_suffix
else:
sourcename = ''
# metadata for the document
meta = self.env.metadata.get(docname)
# local TOC and global TOC tree
self_toc = TocTree(self.env).get_toc_for(docname, self)
toc = self.render_partial(self_toc)['fragment']
return {
'parents': parents,
'prev': prev,
'next': next,
'title': title,
'meta': meta,
'body': body,
'metatags': metatags,
'rellinks': rellinks,
'sourcename': sourcename,
'toc': toc,
# only display a TOC if there's more than one item to show
'display_toc': (self.env.toc_num_entries[docname] > 1),
'page_source_suffix': source_suffix,
}
def write_doc(self, docname: str, doctree: nodes.document) -> None:
destination = StringOutput(encoding='utf-8')
doctree.settings = self.docsettings
self.secnumbers = self.env.toc_secnumbers.get(docname, {})
self.fignumbers = self.env.toc_fignumbers.get(docname, {})
self.imgpath = relative_uri(self.get_target_uri(docname), '_images')
self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads')
self.current_docname = docname
self.docwriter.write(doctree, destination)
self.docwriter.assemble_parts()
body = self.docwriter.parts['fragment']
metatags = self.docwriter.clean_meta
ctx = self.get_doc_context(docname, body, metatags)
self.handle_page(docname, ctx, event_arg=doctree)
def write_doc_serialized(self, docname: str, doctree: nodes.document) -> None:
self.imgpath = relative_uri(self.get_target_uri(docname), self.imagedir)
self.post_process_images(doctree)
title_node = self.env.longtitles.get(docname)
title = self.render_partial(title_node)['title'] if title_node else ''
self.index_page(docname, doctree, title)
def finish(self) -> None:
self.finish_tasks.add_task(self.gen_indices)
self.finish_tasks.add_task(self.gen_pages_from_extensions)
self.finish_tasks.add_task(self.gen_additional_pages)
self.finish_tasks.add_task(self.copy_image_files)
self.finish_tasks.add_task(self.copy_download_files)
self.finish_tasks.add_task(self.copy_static_files)
self.finish_tasks.add_task(self.copy_extra_files)
self.finish_tasks.add_task(self.write_buildinfo)
self.handle_finish()
@progress_message(__('generating indices'))
def gen_indices(self) -> None:
if self.use_index:
self.write_genindex()
self.write_domain_indices()
def gen_pages_from_extensions(self) -> None:
for pagelist in self.events.emit('html-collect-pages'):
for pagename, context, template in pagelist:
self.handle_page(pagename, context, template)
@progress_message(__('writing additional pages'))
def gen_additional_pages(self) -> None:
for pagename, template in self.config.html_additional_pages.items():
logger.info(pagename + ' ', nonl=True)
self.handle_page(pagename, {}, template)
if self.search:
logger.info('search ', nonl=True)
self.handle_page('search', {}, 'search.html')
if self.config.html_use_opensearch and self.search:
logger.info('opensearch ', nonl=True)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
def write_genindex(self) -> None:
genindex = IndexEntries(self.env).create_index(self)
indexcounts = []
for _k, entries in genindex:
indexcounts.append(sum(1 + len(subitems)
for _, (_, subitems, _) in entries))
genindexcontext = {
'genindexentries': genindex,
'genindexcounts': indexcounts,
'split_index': self.config.html_split_index,
}
logger.info('genindex ', nonl=True)
if self.config.html_split_index:
self.handle_page('genindex', genindexcontext,
'genindex-split.html')
self.handle_page('genindex-all', genindexcontext,
'genindex.html')
for (key, entries), count in zip(genindex, indexcounts):
ctx = {'key': key, 'entries': entries, 'count': count,
'genindexentries': genindex}
self.handle_page('genindex-' + key, ctx,
'genindex-single.html')
else:
self.handle_page('genindex', genindexcontext, 'genindex.html')
def write_domain_indices(self) -> None:
for indexname, indexcls, content, collapse in self.domain_indices:
indexcontext = {
'indextitle': indexcls.localname,
'content': content,
'collapse_index': collapse,
}
logger.info(indexname + ' ', nonl=True)
self.handle_page(indexname, indexcontext, 'domainindex.html')
def copy_image_files(self) -> None:
if self.images:
stringify_func = ImageAdapter(self.app.env).get_original_image_uri
ensuredir(path.join(self.outdir, self.imagedir))
for src in status_iterator(self.images, __('copying images... '), "brown",
len(self.images), self.app.verbosity,
stringify_func=stringify_func):
dest = self.images[src]
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, self.imagedir, dest))
except Exception as err:
logger.warning(__('cannot copy image file %r: %s'),
path.join(self.srcdir, src), err)
def copy_download_files(self) -> None:
def to_relpath(f: str) -> str:
return relative_path(self.srcdir, f)
if self.env.dlfiles:
ensuredir(path.join(self.outdir, '_downloads'))
for src in status_iterator(self.env.dlfiles, __('copying downloadable files... '),
"brown", len(self.env.dlfiles), self.app.verbosity,
stringify_func=to_relpath):
try:
dest = path.join(self.outdir, '_downloads', self.env.dlfiles[src][1])
ensuredir(path.dirname(dest))
copyfile(path.join(self.srcdir, src), dest)
except OSError as err:
logger.warning(__('cannot copy downloadable file %r: %s'),
path.join(self.srcdir, src), err)
def create_pygments_style_file(self) -> None:
with open(path.join(self.outdir, '_static', 'pygments.css'), 'w') as f:
f.write(self.highlighter.get_stylesheet())
if self.dark_highlighter:
with open(path.join(self.outdir, '_static', 'pygments_dark.css'), 'w') as f:
f.write(self.dark_highlighter.get_stylesheet())
def copy_translation_js(self) -> None:
jsfile = self._get_translations_js()
if jsfile:
copyfile(jsfile, path.join(self.outdir, '_static', 'translations.js'))
def copy_stemmer_js(self) -> None:
if self.indexer is not None:
if hasattr(self.indexer, 'get_js_stemmer_rawcodes'):
for jsfile in self.indexer.get_js_stemmer_rawcodes():
copyfile(jsfile, path.join(self.outdir, '_static', path.basename(jsfile)))
else:
jsfile = self.indexer.get_js_stemmer_rawcode()
if jsfile:
copyfile(jsfile, path.join(self.outdir, '_static', '_stemmer.js'))
def copy_theme_static_files(self, context: Dict) -> None:
def onerror(filename: str, error: Exception) -> None:
logger.warning(__('Failed to copy a file in html_static_file: %s: %r'),
filename, error)
if self.theme:
for entry in self.theme.get_theme_dirs()[::-1]:
copy_asset(path.join(entry, 'static'),
path.join(self.outdir, '_static'),
excluded=DOTFILES, context=context,
renderer=self.templates, onerror=onerror)
def copy_html_static_files(self, context: Dict) -> None:
def onerror(filename: str, error: Exception) -> None:
logger.warning(__('Failed to copy a file in html_static_file: %s: %r'),
filename, error)
excluded = Matcher(self.config.exclude_patterns + ["**/.*"])
for entry in self.config.html_static_path:
copy_asset(path.join(self.confdir, entry),
path.join(self.outdir, '_static'),
excluded, context=context, renderer=self.templates, onerror=onerror)
def copy_html_logo(self) -> None:
if self.config.html_logo and not isurl(self.config.html_logo):
copy_asset(path.join(self.confdir, self.config.html_logo),
path.join(self.outdir, '_static'))
def copy_html_favicon(self) -> None:
if self.config.html_favicon and not isurl(self.config.html_favicon):
copy_asset(path.join(self.confdir, self.config.html_favicon),
path.join(self.outdir, '_static'))
def copy_static_files(self) -> None:
try:
with progress_message(__('copying static files')):
ensuredir(path.join(self.outdir, '_static'))
context = self.globalcontext.copy()
if self.indexer is not None:
context.update(self.indexer.context_for_searchtool())
self.create_pygments_style_file()
self.copy_translation_js()
self.copy_stemmer_js()
self.copy_theme_static_files(context)
self.copy_html_static_files(context)
self.copy_html_logo()
self.copy_html_favicon()
except OSError as err:
logger.warning(__('cannot copy static file %r'), err)
def copy_extra_files(self) -> None:
try:
with progress_message(__('copying extra files')):
excluded = Matcher(self.config.exclude_patterns)
for extra_path in self.config.html_extra_path:
entry = path.join(self.confdir, extra_path)
copy_asset(entry, self.outdir, excluded)
except OSError as err:
logger.warning(__('cannot copy extra file %r'), err)
def write_buildinfo(self) -> None:
try:
with open(path.join(self.outdir, '.buildinfo'), 'w') as fp:
self.build_info.dump(fp)
except OSError as exc:
logger.warning(__('Failed to write build info file: %r'), exc)
def cleanup(self) -> None:
if self.theme:
self.theme.cleanup()
def post_process_images(self, doctree: Node) -> None:
Builder.post_process_images(self, doctree)
if self.config.html_scaled_image_link and self.html_scaled_image_link:
for node in doctree.findall(nodes.image):
if not any((key in node) for key in ['scale', 'width', 'height']):
continue
elif isinstance(node.parent, nodes.reference):
continue
elif 'no-scaled-link' in node['classes']:
continue
uri = node['uri']
reference = nodes.reference('', '', internal=True)
if uri in self.images:
reference['refuri'] = posixpath.join(self.imgpath,
self.images[uri])
else:
reference['refuri'] = uri
node.replace_self(reference)
reference.append(node)
def load_indexer(self, docnames: Iterable[str]) -> None:
keep = set(self.env.all_docs) - set(docnames)
try:
searchindexfn = path.join(self.outdir, self.searchindex_filename)
if self.indexer_dumps_unicode:
with open(searchindexfn, encoding='utf-8') as ft:
self.indexer.load(ft, self.indexer_format)
else:
with open(searchindexfn, 'rb') as fb:
self.indexer.load(fb, self.indexer_format)
except (OSError, ValueError):
if keep:
logger.warning(__('search index couldn\'t be loaded, but not all '
'documents will be built: the index will be '
'incomplete.'))
# delete all entries for files that will be rebuilt
self.indexer.prune(keep)
def index_page(self, pagename: str, doctree: nodes.document, title: str) -> None:
# only index pages with title
if self.indexer is not None and title:
filename = self.env.doc2path(pagename, base=None)
metadata = self.env.metadata.get(pagename, {})
if 'nosearch' in metadata:
self.indexer.feed(pagename, filename, '', new_document(''))
else:
self.indexer.feed(pagename, filename, title, doctree)
def _get_local_toctree(self, docname: str, collapse: bool = True, **kwargs: Any) -> str:
if 'includehidden' not in kwargs:
kwargs['includehidden'] = False
if kwargs.get('maxdepth') == '':
kwargs.pop('maxdepth')
return self.render_partial(TocTree(self.env).get_toctree_for(
docname, self, collapse, **kwargs))['fragment']
def get_outfilename(self, pagename: str) -> str:
return path.join(self.outdir, os_path(pagename) + self.out_suffix)
def add_sidebars(self, pagename: str, ctx: Dict) -> None:
def has_wildcard(pattern: str) -> bool:
return any(char in pattern for char in '*?[')
sidebars = None
matched = None
customsidebar = None
# default sidebars settings for selected theme
if self.theme.name == 'alabaster':
# provide default settings for alabaster (for compatibility)
# Note: this will be removed before Sphinx-2.0
try:
# get default sidebars settings from alabaster (if defined)
theme_default_sidebars = self.theme.config.get('theme', 'sidebars')
if theme_default_sidebars:
sidebars = [name.strip() for name in theme_default_sidebars.split(',')]
except Exception:
# fallback to better default settings
sidebars = ['about.html', 'navigation.html', 'relations.html',
'searchbox.html', 'donate.html']
else:
theme_default_sidebars = self.theme.get_config('theme', 'sidebars', None)
if theme_default_sidebars:
sidebars = [name.strip() for name in theme_default_sidebars.split(',')]
# user sidebar settings
html_sidebars = self.get_builder_config('sidebars', 'html')
for pattern, patsidebars in html_sidebars.items():
if patmatch(pagename, pattern):
if matched:
if has_wildcard(pattern):
# warn if both patterns contain wildcards
if has_wildcard(matched):
logger.warning(__('page %s matches two patterns in '
'html_sidebars: %r and %r'),
pagename, matched, pattern)
# else the already matched pattern is more specific
# than the present one, because it contains no wildcard
continue
matched = pattern
sidebars = patsidebars
if sidebars is None:
# keep defaults
pass
ctx['sidebars'] = sidebars
ctx['customsidebar'] = customsidebar
# --------- these are overwritten by the serialization builder
def get_target_uri(self, docname: str, typ: str = None) -> str:
return quote(docname) + self.link_suffix
def handle_page(self, pagename: str, addctx: Dict, templatename: str = 'page.html',
outfilename: str = None, event_arg: Any = None) -> None:
ctx = self.globalcontext.copy()
# current_page_name is backwards compatibility
ctx['pagename'] = ctx['current_page_name'] = pagename
ctx['encoding'] = self.config.html_output_encoding
default_baseuri = self.get_target_uri(pagename)
# in the singlehtml builder, default_baseuri still contains an #anchor
# part, which relative_uri doesn't really like...
default_baseuri = default_baseuri.rsplit('#', 1)[0]
if self.config.html_baseurl:
ctx['pageurl'] = posixpath.join(self.config.html_baseurl,
pagename + self.out_suffix)
else:
ctx['pageurl'] = None
def pathto(otheruri: str, resource: bool = False, baseuri: str = default_baseuri) -> str:
if resource and '://' in otheruri:
return otheruri
elif not resource:
otheruri = self.get_target_uri(otheruri)
uri = relative_uri(baseuri, otheruri) or '#'
if uri == '#' and not self.allow_sharp_as_current_path:
uri = baseuri
return uri
ctx['pathto'] = pathto
def hasdoc(name: str) -> bool:
if name in self.env.all_docs:
return True
elif name == 'search' and self.search:
return True
elif name == 'genindex' and self.get_builder_config('use_index', 'html'):
return True
return False
ctx['hasdoc'] = hasdoc
ctx['toctree'] = lambda **kwargs: self._get_local_toctree(pagename, **kwargs)
self.add_sidebars(pagename, ctx)
ctx.update(addctx)
self.script_files[:] = self._script_files
self.css_files[:] = self._css_files
self.update_page_context(pagename, templatename, ctx, event_arg)
newtmpl = self.app.emit_firstresult('html-page-context', pagename,
templatename, ctx, event_arg)
if newtmpl:
templatename = newtmpl
try:
ctx['script_files'] = sorted(list(ctx['script_files']), key=lambda js: js.priority)
except AttributeError:
pass
try:
ctx['css_files'] = sorted(list(ctx['css_files']), key=lambda css: css.priority)
except AttributeError:
pass
try:
output = self.templates.render(templatename, ctx)
except UnicodeError:
logger.warning(__("a Unicode error occurred when rendering the page %s. "
"Please make sure all config values that contain "
"non-ASCII content are Unicode strings."), pagename)
return
except Exception as exc:
raise ThemeError(__("An error happened in rendering the page %s.\nReason: %r") %
(pagename, exc)) from exc
if not outfilename:
outfilename = self.get_outfilename(pagename)
ensuredir(path.dirname(outfilename))
try:
with open(outfilename, 'w', encoding=ctx['encoding'],
errors='xmlcharrefreplace') as f:
f.write(output)
except OSError as err:
logger.warning(__("error writing file %s: %s"), outfilename, err)
if self.copysource and ctx.get('sourcename'):
# copy the source file for the "show source" link
source_name = path.join(self.outdir, '_sources',
os_path(ctx['sourcename']))
ensuredir(path.dirname(source_name))
copyfile(self.env.doc2path(pagename), source_name)
def update_page_context(self, pagename: str, templatename: str,
ctx: Dict, event_arg: Any) -> None:
pass
def handle_finish(self) -> None:
if self.indexer:
self.finish_tasks.add_task(self.dump_search_index)
self.finish_tasks.add_task(self.dump_inventory)
@progress_message(__('dumping object inventory'))
def dump_inventory(self) -> None:
InventoryFile.dump(path.join(self.outdir, INVENTORY_FILENAME), self.env, self)
def dump_search_index(self) -> None:
with progress_message(__('dumping search index in %s') % self.indexer.label()):
self.indexer.prune(self.env.all_docs)
searchindexfn = path.join(self.outdir, self.searchindex_filename)
# first write to a temporary file, so that if dumping fails,
# the existing index won't be overwritten
if self.indexer_dumps_unicode:
with open(searchindexfn + '.tmp', 'w', encoding='utf-8') as ft:
self.indexer.dump(ft, self.indexer_format)
else:
with open(searchindexfn + '.tmp', 'wb') as fb:
self.indexer.dump(fb, self.indexer_format)
os.replace(searchindexfn + '.tmp', searchindexfn)
def convert_html_css_files(app: Sphinx, config: Config) -> None:
html_css_files: List[Tuple[str, Dict]] = []
for entry in config.html_css_files:
if isinstance(entry, str):
html_css_files.append((entry, {}))
else:
try:
filename, attrs = entry
html_css_files.append((filename, attrs))
except Exception:
logger.warning(__('invalid css_file: %r, ignored'), entry)
continue
config.html_css_files = html_css_files
def convert_html_js_files(app: Sphinx, config: Config) -> None:
html_js_files: List[Tuple[str, Dict]] = []
for entry in config.html_js_files:
if isinstance(entry, str):
html_js_files.append((entry, {}))
else:
try:
filename, attrs = entry
html_js_files.append((filename, attrs))
except Exception:
logger.warning(__('invalid js_file: %r, ignored'), entry)
continue
config.html_js_files = html_js_files
def setup_css_tag_helper(app: Sphinx, pagename: str, templatename: str,
context: Dict, doctree: Node) -> None:
pathto = context.get('pathto')
def css_tag(css: Stylesheet) -> str:
attrs = []
for key in sorted(css.attributes):
value = css.attributes[key]
if value is not None:
attrs.append('%s="%s"' % (key, html.escape(value, True)))
attrs.append('href="%s"' % pathto(css.filename, resource=True))
return '<link %s />' % ' '.join(attrs)
context['css_tag'] = css_tag
def setup_js_tag_helper(app: Sphinx, pagename: str, templatename: str,
context: Dict, doctree: Node) -> None:
pathto = context.get('pathto')
def js_tag(js: JavaScript) -> str:
attrs = []
body = ''
if isinstance(js, JavaScript):
for key in sorted(js.attributes):
value = js.attributes[key]
if value is not None:
if key == 'body':
body = value
elif key == 'data_url_root':
attrs.append('data-url_root="%s"' % pathto('', resource=True))
else:
attrs.append('%s="%s"' % (key, html.escape(value, True)))
if js.filename:
attrs.append('src="%s"' % pathto(js.filename, resource=True))
else:
attrs.append('src="%s"' % pathto(js, resource=True))
if attrs:
return '<script %s>%s</script>' % (' '.join(attrs), body)
else:
return '<script>%s</script>' % body
context['js_tag'] = js_tag
def setup_resource_paths(app: Sphinx, pagename: str, templatename: str,
context: Dict, doctree: Node) -> None:
pathto = context.get('pathto')
favicon = context.get('favicon')
if favicon and not isurl(favicon):
context['favicon_url'] = pathto('_static/' + favicon, resource=True)
else:
context['favicon_url'] = favicon
logo = context.get('logo')
if logo and not isurl(logo):
context['logo_url'] = pathto('_static/' + logo, resource=True)
else:
context['logo_url'] = logo
def validate_math_renderer(app: Sphinx) -> None:
if app.builder.format != 'html':
return
name = app.builder.math_renderer_name
if name is None:
raise ConfigError(__('Many math_renderers are registered. '
'But no math_renderer is selected.'))
elif name not in app.registry.html_inline_math_renderers:
raise ConfigError(__('Unknown math_renderer %r is given.') % name)
def validate_html_extra_path(app: Sphinx, config: Config) -> None:
for entry in config.html_extra_path[:]:
extra_path = path.normpath(path.join(app.confdir, entry))
if not path.exists(extra_path):
logger.warning(__('html_extra_path entry %r does not exist'), entry)
config.html_extra_path.remove(entry)
elif (path.splitdrive(app.outdir)[0] == path.splitdrive(extra_path)[0] and
path.commonpath([app.outdir, extra_path]) == app.outdir):
logger.warning(__('html_extra_path entry %r is placed inside outdir'), entry)
config.html_extra_path.remove(entry)
def validate_html_static_path(app: Sphinx, config: Config) -> None:
for entry in config.html_static_path[:]:
static_path = path.normpath(path.join(app.confdir, entry))
if not path.exists(static_path):
logger.warning(__('html_static_path entry %r does not exist'), entry)
config.html_static_path.remove(entry)
elif (path.splitdrive(app.outdir)[0] == path.splitdrive(static_path)[0] and
path.commonpath([app.outdir, static_path]) == app.outdir):
logger.warning(__('html_static_path entry %r is placed inside outdir'), entry)
config.html_static_path.remove(entry)
def validate_html_logo(app: Sphinx, config: Config) -> None:
if (config.html_logo and
not path.isfile(path.join(app.confdir, config.html_logo)) and
not isurl(config.html_logo)):
logger.warning(__('logo file %r does not exist'), config.html_logo)
config.html_logo = None
def validate_html_favicon(app: Sphinx, config: Config) -> None:
if (config.html_favicon and
not path.isfile(path.join(app.confdir, config.html_favicon)) and
not isurl(config.html_favicon)):
logger.warning(__('favicon file %r does not exist'), config.html_favicon)
config.html_favicon = None
class _stable_repr_object():
def __repr__(self):
return '<object>'
UNSET = _stable_repr_object()
def migrate_html_add_permalinks(app: Sphinx, config: Config) -> None:
html_add_permalinks = config.html_add_permalinks
if html_add_permalinks is UNSET:
return
logger.warning(__('html_add_permalinks has been deprecated since v3.5.0. '
'Please use html_permalinks and html_permalinks_icon instead.'))
if not html_add_permalinks:
config.html_permalinks = False
return
config.html_permalinks_icon = html.escape(
html_add_permalinks
)
import sphinxcontrib.serializinghtml
import sphinx.builders.dirhtml
import sphinx.builders.singlehtml
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_builder(StandaloneHTMLBuilder)
app.add_config_value('html_theme', 'alabaster', 'html')
app.add_config_value('html_theme_path', [], 'html')
app.add_config_value('html_theme_options', {}, 'html')
app.add_config_value('html_title',
lambda self: _('%s %s documentation') % (self.project, self.release),
'html', [str])
app.add_config_value('html_short_title', lambda self: self.html_title, 'html')
app.add_config_value('html_style', None, 'html', [str])
app.add_config_value('html_logo', None, 'html', [str])
app.add_config_value('html_favicon', None, 'html', [str])
app.add_config_value('html_css_files', [], 'html')
app.add_config_value('html_js_files', [], 'html')
app.add_config_value('html_static_path', [], 'html')
app.add_config_value('html_extra_path', [], 'html')
app.add_config_value('html_last_updated_fmt', None, 'html', [str])
app.add_config_value('html_sidebars', {}, 'html')
app.add_config_value('html_additional_pages', {}, 'html')
app.add_config_value('html_domain_indices', True, 'html', [list])
app.add_config_value('html_add_permalinks', UNSET, 'html')
app.add_config_value('html_permalinks', True, 'html')
app.add_config_value('html_permalinks_icon', '¶', 'html')
app.add_config_value('html_use_index', True, 'html')
app.add_config_value('html_split_index', False, 'html')
app.add_config_value('html_copy_source', True, 'html')
app.add_config_value('html_show_sourcelink', True, 'html')
app.add_config_value('html_sourcelink_suffix', '.txt', 'html')
app.add_config_value('html_use_opensearch', '', 'html')
app.add_config_value('html_file_suffix', None, 'html', [str])
app.add_config_value('html_link_suffix', None, 'html', [str])
app.add_config_value('html_show_copyright', True, 'html')
app.add_config_value('html_show_search_summary', True, 'html')
app.add_config_value('html_show_sphinx', True, 'html')
app.add_config_value('html_context', {}, 'html')
app.add_config_value('html_output_encoding', 'utf-8', 'html')
app.add_config_value('html_compact_lists', True, 'html')
app.add_config_value('html_secnumber_suffix', '. ', 'html')
app.add_config_value('html_search_language', None, 'html', [str])
app.add_config_value('html_search_options', {}, 'html')
app.add_config_value('html_search_scorer', '', None)
app.add_config_value('html_scaled_image_link', True, 'html')
app.add_config_value('html_baseurl', '', 'html')
app.add_config_value('html_codeblock_linenos_style', 'inline', 'html', ENUM('table', 'inline'))
app.add_config_value('html_math_renderer', None, 'env')
app.add_config_value('html4_writer', False, 'html')
app.add_event('html-collect-pages')
app.add_event('html-page-context')
app.connect('config-inited', convert_html_css_files, priority=800)
app.connect('config-inited', convert_html_js_files, priority=800)
app.connect('config-inited', migrate_html_add_permalinks, priority=800)
app.connect('config-inited', validate_html_extra_path, priority=800)
app.connect('config-inited', validate_html_static_path, priority=800)
app.connect('config-inited', validate_html_logo, priority=800)
app.connect('config-inited', validate_html_favicon, priority=800)
app.connect('builder-inited', validate_math_renderer)
app.connect('html-page-context', setup_css_tag_helper)
app.connect('html-page-context', setup_js_tag_helper)
app.connect('html-page-context', setup_resource_paths)
app.setup_extension('sphinx.ext.mathjax')
app.setup_extension('sphinx.builders.html.transforms')
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| true | true |
f713fb64d17d622dd93ea8409be4defebd59a2ca | 39,023 | py | Python | ServidorPython/python32_web/Lib/site-packages/numpy/lib/tests/test_recfunctions.py | mak213k/Servidor_automatizado_python | 4403ef8027a2f814220baacc95856cf5fbf01d21 | [
"MIT"
] | 65 | 2019-07-24T21:44:58.000Z | 2022-03-23T07:12:07.000Z | venv/lib/python3.7/site-packages/numpy/lib/tests/test_recfunctions.py | haideraltahan/CropMe | 75a111b9d3b2c50c6f2a9a36d21432053f02284d | [
"MIT"
] | 10 | 2019-12-04T23:51:44.000Z | 2022-02-10T09:23:15.000Z | venv/lib/python3.7/site-packages/numpy/lib/tests/test_recfunctions.py | haideraltahan/CropMe | 75a111b9d3b2c50c6f2a9a36d21432053f02284d | [
"MIT"
] | 45 | 2019-08-13T09:20:48.000Z | 2022-02-18T06:09:42.000Z | from __future__ import division, absolute_import, print_function
import pytest
import numpy as np
import numpy.ma as ma
from numpy.ma.mrecords import MaskedRecords
from numpy.ma.testutils import assert_equal
from numpy.testing import assert_, assert_raises
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
repack_fields, unstructured_to_structured, structured_to_unstructured,
apply_along_fields, require_fields, assign_fields_by_name)
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions.zip_descr
class TestRecFunctions(object):
# Misc tests
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_zip_descr(self):
# Test zip_descr
(w, x, y, z) = self.data
# Std array
test = zip_descr((x, x), flatten=True)
assert_equal(test,
np.dtype([('', int), ('', int)]))
test = zip_descr((x, x), flatten=False)
assert_equal(test,
np.dtype([('', int), ('', int)]))
# Std & flexible-dtype
test = zip_descr((x, z), flatten=True)
assert_equal(test,
np.dtype([('', int), ('A', '|S3'), ('B', float)]))
test = zip_descr((x, z), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('A', '|S3'), ('B', float)])]))
# Standard & nested dtype
test = zip_descr((x, w), flatten=True)
assert_equal(test,
np.dtype([('', int),
('a', int),
('ba', float), ('bb', int)]))
test = zip_descr((x, w), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('a', int),
('b', [('ba', float), ('bb', int)])])]))
def test_drop_fields(self):
# Test drop_fields
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
# A basic field
test = drop_fields(a, 'a')
control = np.array([((2, 3.0),), ((5, 6.0),)],
dtype=[('b', [('ba', float), ('bb', int)])])
assert_equal(test, control)
# Another basic field (but nesting two fields)
test = drop_fields(a, 'b')
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
# A nested sub-field
test = drop_fields(a, ['ba', ])
control = np.array([(1, (3.0,)), (4, (6.0,))],
dtype=[('a', int), ('b', [('bb', int)])])
assert_equal(test, control)
# All the nested sub-field from a field: zap that field
test = drop_fields(a, ['ba', 'bb'])
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
test = drop_fields(a, ['a', 'b'])
assert_(test is None)
def test_rename_fields(self):
# Test rename fields
a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
dtype=[('a', int),
('b', [('ba', float), ('bb', (float, 2))])])
test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
control = a.view(newdtype)
assert_equal(test.dtype, newdtype)
assert_equal(test, control)
def test_get_names(self):
# Test get_names
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
def test_get_names_flat(self):
# Test get_names_flat
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names_flat(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b', 'ba', 'bb'))
def test_get_fieldstructure(self):
# Test get_fieldstructure
# No nested fields
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': []})
# One 1-nested field
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})
# One 2-nested fields
ndtype = np.dtype([('A', int),
('B', [('BA', int),
('BB', [('BBA', int), ('BBB', int)])])])
test = get_fieldstructure(ndtype)
control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
assert_equal(test, control)
def test_find_duplicates(self):
# Test find_duplicates
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
(1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
(0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 2]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='A', return_index=True)
control = [0, 1, 2, 3, 5]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='B', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BA', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BB', return_index=True)
control = [0, 1, 2, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_find_duplicates_ignoremask(self):
# Test the ignoremask option of find_duplicates
ndtype = [('a', int)]
a = ma.array([1, 1, 1, 2, 2, 3, 3],
mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
test = find_duplicates(a, ignoremask=True, return_index=True)
control = [0, 1, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 1, 2, 3, 4, 6]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_repack_fields(self):
dt = np.dtype('u1,f4,i8', align=True)
a = np.zeros(2, dtype=dt)
assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))
assert_equal(repack_fields(a).itemsize, 13)
assert_equal(repack_fields(repack_fields(dt), align=True), dt)
# make sure type is preserved
dt = np.dtype((np.record, dt))
assert_(repack_fields(dt).type is np.record)
def test_structured_to_unstructured(self):
a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
out = structured_to_unstructured(a)
assert_equal(out, np.zeros((4,5), dtype='f8'))
b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ]))
out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
assert_equal(out, np.array([ 1. , 4. , 7. , 10. ]))
c = np.arange(20).reshape((4,5))
out = unstructured_to_structured(c, a.dtype)
want = np.array([( 0, ( 1., 2), [ 3., 4.]),
( 5, ( 6., 7), [ 8., 9.]),
(10, (11., 12), [13., 14.]),
(15, (16., 17), [18., 19.])],
dtype=[('a', 'i4'),
('b', [('f0', 'f4'), ('f1', 'u2')]),
('c', 'f4', (2,))])
assert_equal(out, want)
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
assert_equal(apply_along_fields(np.mean, d),
np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ]))
assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
np.array([ 3. , 5.5, 9. , 11. ]))
# check that for uniform field dtypes we get a view, not a copy:
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
# including uniform fields with subarrays unpacked
d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
(8, [9, 10], [[11, 12], [13, 14]])],
dtype=[('x0', 'i4'), ('x1', ('i4', 2)), ('x2', ('i4', (2, 2)))])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
# test that nested fields with identical names don't break anything
point = np.dtype([('x', int), ('y', int)])
triangle = np.dtype([('a', point), ('b', point), ('c', point)])
arr = np.zeros(10, triangle)
res = structured_to_unstructured(arr, dtype=int)
assert_equal(res, np.zeros((10, 6), dtype=int))
def test_field_assignment_by_name(self):
a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
newdt = [('b', 'f4'), ('c', 'u1')]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([(1,2), (3,4)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype))
# test nested fields
a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])
newdt = [('a', [('c', 'u1')])]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([((2,),), ((3,),)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype))
# test unstructured code path for 0d arrays
a, b = np.array(3), np.array(0)
assign_fields_by_name(b, a)
assert_equal(b[()], 3)
class TestRecursiveFillFields(object):
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
b = np.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = np.array([(1, 10.), (2, 20.), (0, 0.)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
def test_masked_flexible(self):
# Test recursive_fill_fields on masked flexible-array
a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
dtype=[('A', int), ('B', float)])
b = ma.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
mask=[(0, 1), (1, 0), (0, 0)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
class TestMergeArrays(object):
# Test merge_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array(
[(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test merge_arrays on a single array.
(_, x, _, z) = self.data
test = merge_arrays(x)
control = np.array([(1,), (2,)], dtype=[('f0', int)])
assert_equal(test, control)
test = merge_arrays((x,))
assert_equal(test, control)
test = merge_arrays(z, flatten=False)
assert_equal(test, z)
test = merge_arrays(z, flatten=True)
assert_equal(test, z)
def test_solo_w_flatten(self):
# Test merge_arrays on a single array w & w/o flattening
w = self.data[0]
test = merge_arrays(w, flatten=False)
assert_equal(test, w)
test = merge_arrays(w, flatten=True)
control = np.array([(1, 2, 3.0), (4, 5, 6.0)],
dtype=[('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
def test_standard(self):
# Test standard & standard
# Test merge arrays
(_, x, y, _) = self.data
test = merge_arrays((x, y), usemask=False)
control = np.array([(1, 10), (2, 20), (-1, 30)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, y), usemask=True)
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_flatten(self):
# Test standard & flexible
(_, x, _, z) = self.data
test = merge_arrays((x, z), flatten=True)
control = np.array([(1, 'A', 1.), (2, 'B', 2.)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
test = merge_arrays((x, z), flatten=False)
control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],
dtype=[('f0', int),
('f1', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
def test_flatten_wflexible(self):
# Test flatten standard & nested
(w, x, _, _) = self.data
test = merge_arrays((x, w), flatten=True)
control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],
dtype=[('f0', int),
('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
test = merge_arrays((x, w), flatten=False)
controldtype = [('f0', int),
('f1', [('a', int),
('b', [('ba', float), ('bb', int)])])]
control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))],
dtype=controldtype)
assert_equal(test, control)
def test_wmasked_arrays(self):
# Test merge_arrays masked arrays
(_, x, _, _) = self.data
mx = ma.array([1, 2, 3], mask=[1, 0, 0])
test = merge_arrays((x, mx), usemask=True)
control = ma.array([(1, 1), (2, 2), (-1, 3)],
mask=[(0, 1), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, mx), usemask=True, asrecarray=True)
assert_equal(test, control)
assert_(isinstance(test, MaskedRecords))
def test_w_singlefield(self):
# Test single field
test = merge_arrays((np.array([1, 2]).view([('a', int)]),
np.array([10., 20., 30.])),)
control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('a', int), ('f1', float)])
assert_equal(test, control)
def test_w_shorter_flex(self):
# Test merge_arrays w/ a shorter flexndarray.
z = self.data[-1]
# Fixme, this test looks incomplete and broken
#test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
#control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
# dtype=[('A', '|S3'), ('B', float), ('C', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes warnings about unused variables
merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
dtype=[('A', '|S3'), ('B', float), ('C', int)])
def test_singlerecord(self):
(_, x, y, z) = self.data
test = merge_arrays((x[0], y[0], z[0]), usemask=False)
control = np.array([(1, 10, ('A', 1))],
dtype=[('f0', int),
('f1', int),
('f2', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
class TestAppendFields(object):
# Test append_fields
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_append_single(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, 'A', data=[10, 20, 30])
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('A', int)],)
assert_equal(test, control)
def test_append_double(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
dtype=[('f0', int), ('A', int), ('B', int)],)
assert_equal(test, control)
def test_append_on_flex(self):
# Test append_fields on flexible type arrays
z = self.data[-1]
test = append_fields(z, 'C', data=[10, 20, 30])
control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('C', int)],)
assert_equal(test, control)
def test_append_on_nested(self):
# Test append_fields on nested fields
w = self.data[0]
test = append_fields(w, 'C', data=[10, 20, 30])
control = ma.array([(1, (2, 3.0), 10),
(4, (5, 6.0), 20),
(-1, (-1, -1.), 30)],
mask=[(
0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
dtype=[('a', int),
('b', [('ba', float), ('bb', int)]),
('C', int)],)
assert_equal(test, control)
class TestStackArrays(object):
# Test stack_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test stack_arrays on single arrays
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
assert_(test is x)
test = stack_arrays(x)
assert_equal(test, x)
assert_(test is x)
def test_unnamed_fields(self):
# Tests combinations of arrays w/o named fields
(_, x, y, _) = self.data
test = stack_arrays((x, x), usemask=False)
control = np.array([1, 2, 1, 2])
assert_equal(test, control)
test = stack_arrays((x, y), usemask=False)
control = np.array([1, 2, 10, 20, 30])
assert_equal(test, control)
test = stack_arrays((y, x), usemask=False)
control = np.array([10, 20, 30, 1, 2])
assert_equal(test, control)
def test_unnamed_and_named_fields(self):
# Test combination of arrays w/ & w/o named fields
(_, x, _, z) = self.data
test = stack_arrays((x, z))
control = ma.array([(1, -1, -1), (2, -1, -1),
(-1, 'A', 1), (-1, 'B', 2)],
mask=[(0, 1, 1), (0, 1, 1),
(1, 0, 0), (1, 0, 0)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
def test_matching_named_fields(self):
# Test combination of arrays w/ matching field names
(_, x, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
test = stack_arrays((z, zz))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, zz, x))
ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
('a', 10., 100., -1), ('b', 20., 200., -1),
('c', 30., 300., -1),
(-1, -1, -1, 1), (-1, -1, -1, 2)],
dtype=ndtype,
mask=[(0, 0, 1, 1), (0, 0, 1, 1),
(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
(1, 1, 1, 0), (1, 1, 1, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_defaults(self):
# Test defaults: no exception raised if keys of defaults are not fields.
(_, _, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}
test = stack_arrays((z, zz), defaults=defaults)
control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_autoconversion(self):
# Tests autoconversion
adtype = [('A', int), ('B', bool), ('C', float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [('A', int), ('B', float), ('C', float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
with assert_raises(TypeError):
stack_arrays((a, b), autoconvert=False)
def test_checktitles(self):
# Test using titles in the field names
adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
test = stack_arrays((a, b))
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_subdtype(self):
z = np.array([
('A', 1), ('B', 2)
], dtype=[('A', '|S3'), ('B', float, (1,))])
zz = np.array([
('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
res = stack_arrays((z, zz))
expected = ma.array(
data=[
(b'A', [1.0], 0),
(b'B', [2.0], 0),
(b'a', [10.0], 100.0),
(b'b', [20.0], 200.0),
(b'c', [30.0], 300.0)],
mask=[
(False, [False], True),
(False, [False], True),
(False, [False], False),
(False, [False], False),
(False, [False], False)
],
dtype=zz.dtype
)
assert_equal(res.dtype, expected.dtype)
assert_equal(res, expected)
assert_equal(res.mask, expected.mask)
class TestJoinBy(object):
def setup(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_inner_join(self):
# Basic test of join_by
a, b = self.a, self.b
test = join_by('a', a, b, jointype='inner')
control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),
(7, 57, 67, 107, 102), (8, 58, 68, 108, 103),
(9, 59, 69, 109, 104)],
dtype=[('a', int), ('b1', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_join(self):
a, b = self.a, self.b
# Fixme, this test is broken
#test = join_by(('a', 'b'), a, b)
#control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
# (7, 57, 107, 102), (8, 58, 108, 103),
# (9, 59, 109, 104)],
# dtype=[('a', int), ('b', int),
# ('c', int), ('d', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes unused variable warnings
join_by(('a', 'b'), a, b)
np.array([(5, 55, 105, 100), (6, 56, 106, 101),
(7, 57, 107, 102), (8, 58, 108, 103),
(9, 59, 109, 104)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
def test_join_subdtype(self):
# tests the bug in https://stackoverflow.com/q/44769632/102441
from numpy.lib import recfunctions as rfn
foo = np.array([(1,)],
dtype=[('key', int)])
bar = np.array([(1, np.array([1,2,3]))],
dtype=[('key', int), ('value', 'uint16', 3)])
res = join_by('key', foo, bar)
assert_equal(res, bar.view(ma.MaskedArray))
def test_outer_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'outer')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(5, 65, -1, 100), (6, 56, 106, -1),
(6, 66, -1, 101), (7, 57, 107, -1),
(7, 67, -1, 102), (8, 58, 108, -1),
(8, 68, -1, 103), (9, 59, 109, -1),
(9, 69, -1, 104), (10, 70, -1, 105),
(11, 71, -1, 106), (12, 72, -1, 107),
(13, 73, -1, 108), (14, 74, -1, 109)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_leftouter_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'leftouter')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(6, 56, 106, -1), (7, 57, 107, -1),
(8, 58, 108, -1), (9, 59, 109, -1)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1)],
dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
assert_equal(test, control)
def test_different_field_order(self):
# gh-8940
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
# this should not give a FutureWarning:
j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False)
assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2'])
def test_duplicate_keys(self):
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
@pytest.mark.xfail(reason="See comment at gh-9343")
def test_same_name_different_dtypes_key(self):
a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
expected_dtype = np.dtype([
('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_same_name_different_dtypes(self):
# gh-9338
a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])
expected_dtype = np.dtype([
('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_subarray_key(self):
a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])
a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)
b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])
b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)
expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])
expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)
res = join_by('pos', a, b)
assert_equal(res.dtype, expected_dtype)
assert_equal(res, expected)
def test_padded_dtype(self):
dt = np.dtype('i1,f4', align=True)
dt.names = ('k', 'v')
assert_(len(dt.descr), 3) # padding field is inserted
a = np.array([(1, 3), (3, 2)], dt)
b = np.array([(1, 1), (2, 2)], dt)
res = join_by('k', a, b)
# no padding fields remain
expected_dtype = np.dtype([
('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')
])
assert_equal(res.dtype, expected_dtype)
class TestJoinBy2(object):
@classmethod
def setup(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_no_r1postfix(self):
# Basic test of join_by no_r1postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='', r2postfix='2', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_no_postfix(self):
assert_raises(ValueError, join_by, 'a', self.a, self.b,
r1postfix='', r2postfix='')
def test_no_r2postfix(self):
# Basic test of join_by no_r2postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='1', r2postfix='', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b1', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_two_keys_two_vars(self):
a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(50, 60), np.arange(10, 20))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(65, 75), np.arange(0, 10))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
(10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
(10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
(10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
(10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
dtype=[('k', int), ('a', int), ('b1', int),
('b2', int), ('c1', int), ('c2', int)])
test = join_by(
['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
class TestAppendFieldsObj(object):
"""
Test append_fields with arrays containing objects
"""
# https://github.com/numpy/numpy/issues/2346
def setup(self):
from datetime import date
self.data = dict(obj=date(2000, 1, 1))
def test_append_to_objects(self):
"Test append_fields when the base array contains objects"
obj = self.data['obj']
x = np.array([(obj, 1.), (obj, 2.)],
dtype=[('A', object), ('B', float)])
y = np.array([10, 20], dtype=int)
test = append_fields(x, 'C', data=y, usemask=False)
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
dtype=[('A', object), ('B', float), ('C', int)])
assert_equal(test, control)
| 42.27844 | 85 | 0.429413 | from __future__ import division, absolute_import, print_function
import pytest
import numpy as np
import numpy.ma as ma
from numpy.ma.mrecords import MaskedRecords
from numpy.ma.testutils import assert_equal
from numpy.testing import assert_, assert_raises
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
repack_fields, unstructured_to_structured, structured_to_unstructured,
apply_along_fields, require_fields, assign_fields_by_name)
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions.zip_descr
class TestRecFunctions(object):
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_zip_descr(self):
(w, x, y, z) = self.data
test = zip_descr((x, x), flatten=True)
assert_equal(test,
np.dtype([('', int), ('', int)]))
test = zip_descr((x, x), flatten=False)
assert_equal(test,
np.dtype([('', int), ('', int)]))
test = zip_descr((x, z), flatten=True)
assert_equal(test,
np.dtype([('', int), ('A', '|S3'), ('B', float)]))
test = zip_descr((x, z), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('A', '|S3'), ('B', float)])]))
test = zip_descr((x, w), flatten=True)
assert_equal(test,
np.dtype([('', int),
('a', int),
('ba', float), ('bb', int)]))
test = zip_descr((x, w), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('a', int),
('b', [('ba', float), ('bb', int)])])]))
def test_drop_fields(self):
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
test = drop_fields(a, 'a')
control = np.array([((2, 3.0),), ((5, 6.0),)],
dtype=[('b', [('ba', float), ('bb', int)])])
assert_equal(test, control)
test = drop_fields(a, 'b')
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
test = drop_fields(a, ['ba', ])
control = np.array([(1, (3.0,)), (4, (6.0,))],
dtype=[('a', int), ('b', [('bb', int)])])
assert_equal(test, control)
test = drop_fields(a, ['ba', 'bb'])
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
test = drop_fields(a, ['a', 'b'])
assert_(test is None)
def test_rename_fields(self):
a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
dtype=[('a', int),
('b', [('ba', float), ('bb', (float, 2))])])
test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
control = a.view(newdtype)
assert_equal(test.dtype, newdtype)
assert_equal(test, control)
def test_get_names(self):
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
def test_get_names_flat(self):
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names_flat(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b', 'ba', 'bb'))
def test_get_fieldstructure(self):
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': []})
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})
ndtype = np.dtype([('A', int),
('B', [('BA', int),
('BB', [('BBA', int), ('BBB', int)])])])
test = get_fieldstructure(ndtype)
control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
assert_equal(test, control)
def test_find_duplicates(self):
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
(1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
(0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 2]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='A', return_index=True)
control = [0, 1, 2, 3, 5]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='B', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BA', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BB', return_index=True)
control = [0, 1, 2, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_find_duplicates_ignoremask(self):
ndtype = [('a', int)]
a = ma.array([1, 1, 1, 2, 2, 3, 3],
mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
test = find_duplicates(a, ignoremask=True, return_index=True)
control = [0, 1, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 1, 2, 3, 4, 6]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_repack_fields(self):
dt = np.dtype('u1,f4,i8', align=True)
a = np.zeros(2, dtype=dt)
assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))
assert_equal(repack_fields(a).itemsize, 13)
assert_equal(repack_fields(repack_fields(dt), align=True), dt)
dt = np.dtype((np.record, dt))
assert_(repack_fields(dt).type is np.record)
def test_structured_to_unstructured(self):
a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
out = structured_to_unstructured(a)
assert_equal(out, np.zeros((4,5), dtype='f8'))
b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ]))
out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
assert_equal(out, np.array([ 1. , 4. , 7. , 10. ]))
c = np.arange(20).reshape((4,5))
out = unstructured_to_structured(c, a.dtype)
want = np.array([( 0, ( 1., 2), [ 3., 4.]),
( 5, ( 6., 7), [ 8., 9.]),
(10, (11., 12), [13., 14.]),
(15, (16., 17), [18., 19.])],
dtype=[('a', 'i4'),
('b', [('f0', 'f4'), ('f1', 'u2')]),
('c', 'f4', (2,))])
assert_equal(out, want)
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
assert_equal(apply_along_fields(np.mean, d),
np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ]))
assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
np.array([ 3. , 5.5, 9. , 11. ]))
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
(8, [9, 10], [[11, 12], [13, 14]])],
dtype=[('x0', 'i4'), ('x1', ('i4', 2)), ('x2', ('i4', (2, 2)))])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
point = np.dtype([('x', int), ('y', int)])
triangle = np.dtype([('a', point), ('b', point), ('c', point)])
arr = np.zeros(10, triangle)
res = structured_to_unstructured(arr, dtype=int)
assert_equal(res, np.zeros((10, 6), dtype=int))
def test_field_assignment_by_name(self):
a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
newdt = [('b', 'f4'), ('c', 'u1')]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([(1,2), (3,4)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype))
# test nested fields
a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])
newdt = [('a', [('c', 'u1')])]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([((2,),), ((3,),)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype))
# test unstructured code path for 0d arrays
a, b = np.array(3), np.array(0)
assign_fields_by_name(b, a)
assert_equal(b[()], 3)
class TestRecursiveFillFields(object):
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
b = np.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = np.array([(1, 10.), (2, 20.), (0, 0.)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
def test_masked_flexible(self):
# Test recursive_fill_fields on masked flexible-array
a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
dtype=[('A', int), ('B', float)])
b = ma.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
mask=[(0, 1), (1, 0), (0, 0)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
class TestMergeArrays(object):
# Test merge_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array(
[(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test merge_arrays on a single array.
(_, x, _, z) = self.data
test = merge_arrays(x)
control = np.array([(1,), (2,)], dtype=[('f0', int)])
assert_equal(test, control)
test = merge_arrays((x,))
assert_equal(test, control)
test = merge_arrays(z, flatten=False)
assert_equal(test, z)
test = merge_arrays(z, flatten=True)
assert_equal(test, z)
def test_solo_w_flatten(self):
# Test merge_arrays on a single array w & w/o flattening
w = self.data[0]
test = merge_arrays(w, flatten=False)
assert_equal(test, w)
test = merge_arrays(w, flatten=True)
control = np.array([(1, 2, 3.0), (4, 5, 6.0)],
dtype=[('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
def test_standard(self):
# Test standard & standard
# Test merge arrays
(_, x, y, _) = self.data
test = merge_arrays((x, y), usemask=False)
control = np.array([(1, 10), (2, 20), (-1, 30)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, y), usemask=True)
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_flatten(self):
# Test standard & flexible
(_, x, _, z) = self.data
test = merge_arrays((x, z), flatten=True)
control = np.array([(1, 'A', 1.), (2, 'B', 2.)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
test = merge_arrays((x, z), flatten=False)
control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],
dtype=[('f0', int),
('f1', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
def test_flatten_wflexible(self):
# Test flatten standard & nested
(w, x, _, _) = self.data
test = merge_arrays((x, w), flatten=True)
control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],
dtype=[('f0', int),
('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
test = merge_arrays((x, w), flatten=False)
controldtype = [('f0', int),
('f1', [('a', int),
('b', [('ba', float), ('bb', int)])])]
control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))],
dtype=controldtype)
assert_equal(test, control)
def test_wmasked_arrays(self):
# Test merge_arrays masked arrays
(_, x, _, _) = self.data
mx = ma.array([1, 2, 3], mask=[1, 0, 0])
test = merge_arrays((x, mx), usemask=True)
control = ma.array([(1, 1), (2, 2), (-1, 3)],
mask=[(0, 1), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, mx), usemask=True, asrecarray=True)
assert_equal(test, control)
assert_(isinstance(test, MaskedRecords))
def test_w_singlefield(self):
# Test single field
test = merge_arrays((np.array([1, 2]).view([('a', int)]),
np.array([10., 20., 30.])),)
control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('a', int), ('f1', float)])
assert_equal(test, control)
def test_w_shorter_flex(self):
# Test merge_arrays w/ a shorter flexndarray.
z = self.data[-1]
# Fixme, this test looks incomplete and broken
#test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
#control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
# dtype=[('A', '|S3'), ('B', float), ('C', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes warnings about unused variables
merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
dtype=[('A', '|S3'), ('B', float), ('C', int)])
def test_singlerecord(self):
(_, x, y, z) = self.data
test = merge_arrays((x[0], y[0], z[0]), usemask=False)
control = np.array([(1, 10, ('A', 1))],
dtype=[('f0', int),
('f1', int),
('f2', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
class TestAppendFields(object):
# Test append_fields
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_append_single(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, 'A', data=[10, 20, 30])
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('A', int)],)
assert_equal(test, control)
def test_append_double(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
dtype=[('f0', int), ('A', int), ('B', int)],)
assert_equal(test, control)
def test_append_on_flex(self):
# Test append_fields on flexible type arrays
z = self.data[-1]
test = append_fields(z, 'C', data=[10, 20, 30])
control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('C', int)],)
assert_equal(test, control)
def test_append_on_nested(self):
# Test append_fields on nested fields
w = self.data[0]
test = append_fields(w, 'C', data=[10, 20, 30])
control = ma.array([(1, (2, 3.0), 10),
(4, (5, 6.0), 20),
(-1, (-1, -1.), 30)],
mask=[(
0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
dtype=[('a', int),
('b', [('ba', float), ('bb', int)]),
('C', int)],)
assert_equal(test, control)
class TestStackArrays(object):
# Test stack_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test stack_arrays on single arrays
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
assert_(test is x)
test = stack_arrays(x)
assert_equal(test, x)
assert_(test is x)
def test_unnamed_fields(self):
# Tests combinations of arrays w/o named fields
(_, x, y, _) = self.data
test = stack_arrays((x, x), usemask=False)
control = np.array([1, 2, 1, 2])
assert_equal(test, control)
test = stack_arrays((x, y), usemask=False)
control = np.array([1, 2, 10, 20, 30])
assert_equal(test, control)
test = stack_arrays((y, x), usemask=False)
control = np.array([10, 20, 30, 1, 2])
assert_equal(test, control)
def test_unnamed_and_named_fields(self):
# Test combination of arrays w/ & w/o named fields
(_, x, _, z) = self.data
test = stack_arrays((x, z))
control = ma.array([(1, -1, -1), (2, -1, -1),
(-1, 'A', 1), (-1, 'B', 2)],
mask=[(0, 1, 1), (0, 1, 1),
(1, 0, 0), (1, 0, 0)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
def test_matching_named_fields(self):
# Test combination of arrays w/ matching field names
(_, x, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
test = stack_arrays((z, zz))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, zz, x))
ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
('a', 10., 100., -1), ('b', 20., 200., -1),
('c', 30., 300., -1),
(-1, -1, -1, 1), (-1, -1, -1, 2)],
dtype=ndtype,
mask=[(0, 0, 1, 1), (0, 0, 1, 1),
(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
(1, 1, 1, 0), (1, 1, 1, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_defaults(self):
# Test defaults: no exception raised if keys of defaults are not fields.
(_, _, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}
test = stack_arrays((z, zz), defaults=defaults)
control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_autoconversion(self):
# Tests autoconversion
adtype = [('A', int), ('B', bool), ('C', float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [('A', int), ('B', float), ('C', float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
with assert_raises(TypeError):
stack_arrays((a, b), autoconvert=False)
def test_checktitles(self):
# Test using titles in the field names
adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
test = stack_arrays((a, b))
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_subdtype(self):
z = np.array([
('A', 1), ('B', 2)
], dtype=[('A', '|S3'), ('B', float, (1,))])
zz = np.array([
('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
res = stack_arrays((z, zz))
expected = ma.array(
data=[
(b'A', [1.0], 0),
(b'B', [2.0], 0),
(b'a', [10.0], 100.0),
(b'b', [20.0], 200.0),
(b'c', [30.0], 300.0)],
mask=[
(False, [False], True),
(False, [False], True),
(False, [False], False),
(False, [False], False),
(False, [False], False)
],
dtype=zz.dtype
)
assert_equal(res.dtype, expected.dtype)
assert_equal(res, expected)
assert_equal(res.mask, expected.mask)
class TestJoinBy(object):
def setup(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_inner_join(self):
# Basic test of join_by
a, b = self.a, self.b
test = join_by('a', a, b, jointype='inner')
control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),
(7, 57, 67, 107, 102), (8, 58, 68, 108, 103),
(9, 59, 69, 109, 104)],
dtype=[('a', int), ('b1', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_join(self):
a, b = self.a, self.b
# Fixme, this test is broken
#test = join_by(('a', 'b'), a, b)
#control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
# (7, 57, 107, 102), (8, 58, 108, 103),
# (9, 59, 109, 104)],
# dtype=[('a', int), ('b', int),
# ('c', int), ('d', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes unused variable warnings
join_by(('a', 'b'), a, b)
np.array([(5, 55, 105, 100), (6, 56, 106, 101),
(7, 57, 107, 102), (8, 58, 108, 103),
(9, 59, 109, 104)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
def test_join_subdtype(self):
# tests the bug in https://stackoverflow.com/q/44769632/102441
from numpy.lib import recfunctions as rfn
foo = np.array([(1,)],
dtype=[('key', int)])
bar = np.array([(1, np.array([1,2,3]))],
dtype=[('key', int), ('value', 'uint16', 3)])
res = join_by('key', foo, bar)
assert_equal(res, bar.view(ma.MaskedArray))
def test_outer_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'outer')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(5, 65, -1, 100), (6, 56, 106, -1),
(6, 66, -1, 101), (7, 57, 107, -1),
(7, 67, -1, 102), (8, 58, 108, -1),
(8, 68, -1, 103), (9, 59, 109, -1),
(9, 69, -1, 104), (10, 70, -1, 105),
(11, 71, -1, 106), (12, 72, -1, 107),
(13, 73, -1, 108), (14, 74, -1, 109)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_leftouter_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'leftouter')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(6, 56, 106, -1), (7, 57, 107, -1),
(8, 58, 108, -1), (9, 59, 109, -1)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1)],
dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
assert_equal(test, control)
def test_different_field_order(self):
# gh-8940
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
# this should not give a FutureWarning:
j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False)
assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2'])
def test_duplicate_keys(self):
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
@pytest.mark.xfail(reason="See comment at gh-9343")
def test_same_name_different_dtypes_key(self):
a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
expected_dtype = np.dtype([
('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_same_name_different_dtypes(self):
# gh-9338
a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])
expected_dtype = np.dtype([
('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_subarray_key(self):
a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])
a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)
b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])
b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)
expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])
expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)
res = join_by('pos', a, b)
assert_equal(res.dtype, expected_dtype)
assert_equal(res, expected)
def test_padded_dtype(self):
dt = np.dtype('i1,f4', align=True)
dt.names = ('k', 'v')
assert_(len(dt.descr), 3) # padding field is inserted
a = np.array([(1, 3), (3, 2)], dt)
b = np.array([(1, 1), (2, 2)], dt)
res = join_by('k', a, b)
# no padding fields remain
expected_dtype = np.dtype([
('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')
])
assert_equal(res.dtype, expected_dtype)
class TestJoinBy2(object):
@classmethod
def setup(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_no_r1postfix(self):
# Basic test of join_by no_r1postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='', r2postfix='2', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_no_postfix(self):
assert_raises(ValueError, join_by, 'a', self.a, self.b,
r1postfix='', r2postfix='')
def test_no_r2postfix(self):
# Basic test of join_by no_r2postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='1', r2postfix='', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b1', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_two_keys_two_vars(self):
a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(50, 60), np.arange(10, 20))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(65, 75), np.arange(0, 10))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
(10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
(10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
(10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
(10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
dtype=[('k', int), ('a', int), ('b1', int),
('b2', int), ('c1', int), ('c2', int)])
test = join_by(
['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
class TestAppendFieldsObj(object):
# https://github.com/numpy/numpy/issues/2346
def setup(self):
from datetime import date
self.data = dict(obj=date(2000, 1, 1))
def test_append_to_objects(self):
obj = self.data['obj']
x = np.array([(obj, 1.), (obj, 2.)],
dtype=[('A', object), ('B', float)])
y = np.array([10, 20], dtype=int)
test = append_fields(x, 'C', data=y, usemask=False)
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
dtype=[('A', object), ('B', float), ('C', int)])
assert_equal(test, control)
| true | true |
f713fcb4e6bd2a51f7ae62f8c0d54992a88852ed | 4,771 | py | Python | capture/noworkflow/now/persistence/models/diff.py | raffaelfoidl/noworkflow | aa4ca189df24fec6c7abd32bcca6a097b21fdf31 | [
"MIT"
] | 108 | 2015-02-04T14:16:51.000Z | 2022-03-06T13:52:45.000Z | capture/noworkflow/now/persistence/models/diff.py | raffaelfoidl/noworkflow | aa4ca189df24fec6c7abd32bcca6a097b21fdf31 | [
"MIT"
] | 92 | 2015-01-19T14:58:06.000Z | 2021-04-19T17:28:50.000Z | capture/noworkflow/now/persistence/models/diff.py | raffaelfoidl/noworkflow | aa4ca189df24fec6c7abd32bcca6a097b21fdf31 | [
"MIT"
] | 31 | 2015-03-03T23:53:59.000Z | 2021-11-11T04:23:44.000Z | # Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""Diff Object"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from collections import OrderedDict
from future.utils import viewkeys
from .base import Model, proxy_gen
from .trial import Trial
from .graphs.diff_graph import DiffGraph
class Diff(Model):
"""This model represents a diff between two trials
Initialize it by passing both trials ids:
diff = Diff(1, 2)
There are four visualization modes for the graph:
tree: activation tree without any filters
diff.graph.mode = 0
no match: tree transformed into a graph by the addition of sequence and
return edges and removal of intermediate call edges
diff.graph.mode = 1
exact match: calls are only combined when all the sub-call match
diff.graph.mode = 2
namesapce: calls are combined without considering the sub-calls
diff.graph.mode = 3
You can change the graph width and height by the variables:
diff.graph.width = 600
diff.graph.height = 400
"""
__modelname__ = "Diff"
DEFAULT = {
"graph.width": 500,
"graph.height": 500,
"graph.mode": 3,
"graph.time_limit": None,
}
REPLACE = {
"graph_width": "graph.width",
"graph_height": "graph.height",
"graph_mode": "graph.mode",
"graph_time_limit": "graph.time_limit",
}
def __init__(self, trial_ref1, trial_ref2, **kwargs):
super(Diff, self).__init__(trial_ref1, trial_ref2, **kwargs)
self.trial1 = Trial(trial_ref1)
self.trial2 = Trial(trial_ref2)
self.graph = DiffGraph(self)
self.initialize_default(kwargs)
@property
def trial(self):
"""Return a tuple with information from both trials """
extra = ("start", "finish", "duration_text")
ignore = ("id",)
return diff_dict(
self.trial1.to_dict(ignore=ignore, extra=extra), # pylint: disable=no-member
self.trial2.to_dict(ignore=ignore, extra=extra)) # pylint: disable=no-member
@property
def modules(self):
"""Diff modules from trials"""
return diff_set(
set(proxy_gen(self.trial1.modules)),
set(proxy_gen(self.trial2.modules)))
@property
def environment(self):
"""Diff environment variables"""
return diff_set(
set(self.trial1.environment_attrs),
set(self.trial2.environment_attrs))
@property
def file_accesses(self):
"""Diff file accesses"""
return diff_set(
set(self.trial1.file_accesses),
set(self.trial2.file_accesses),
create_replaced=False)
def _ipython_display_(self):
"""Display history graph"""
if hasattr(self, "graph"):
# pylint: disable=protected-access
return self.graph._ipython_display_()
from IPython.display import display
display({
'text/plain': 'Diff {}:{}'.format(
self.trial1.id,
self.trial2.id
)
})
def diff_dict(before, after):
"""Compare dicts.
Return a dict with keys shared by both dicts that have different values
key -> [before[key], after[key]]
"""
result = OrderedDict()
for key in viewkeys(before):
if key != "id" and before[key] != after[key]:
result[key] = [before[key], after[key]]
return result
def diff_set(before, after, create_replaced=True):
"""Compare sets to get additions, removals and replacements
Return 3 sets:
added -- objects present in second set, but not present in first set
removed -- objects present in first set, but not present in second set
replaced -- objects that have the same name in both sets, but are different
"""
removed = before - after
added = after - before
replaced = set()
removed_by_name = {}
for element_removed in removed:
removed_by_name[element_removed.name] = element_removed
for element_added in added:
element_removed = removed_by_name.get(element_added.name)
if element_removed and create_replaced:
replaced.add((element_removed, element_added))
if create_replaced:
for (element_removed, element_added) in replaced:
removed.discard(element_removed)
added.discard(element_added)
return (added, removed, replaced)
| 32.678082 | 108 | 0.627961 |
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from collections import OrderedDict
from future.utils import viewkeys
from .base import Model, proxy_gen
from .trial import Trial
from .graphs.diff_graph import DiffGraph
class Diff(Model):
__modelname__ = "Diff"
DEFAULT = {
"graph.width": 500,
"graph.height": 500,
"graph.mode": 3,
"graph.time_limit": None,
}
REPLACE = {
"graph_width": "graph.width",
"graph_height": "graph.height",
"graph_mode": "graph.mode",
"graph_time_limit": "graph.time_limit",
}
def __init__(self, trial_ref1, trial_ref2, **kwargs):
super(Diff, self).__init__(trial_ref1, trial_ref2, **kwargs)
self.trial1 = Trial(trial_ref1)
self.trial2 = Trial(trial_ref2)
self.graph = DiffGraph(self)
self.initialize_default(kwargs)
@property
def trial(self):
extra = ("start", "finish", "duration_text")
ignore = ("id",)
return diff_dict(
self.trial1.to_dict(ignore=ignore, extra=extra),
self.trial2.to_dict(ignore=ignore, extra=extra))
@property
def modules(self):
return diff_set(
set(proxy_gen(self.trial1.modules)),
set(proxy_gen(self.trial2.modules)))
@property
def environment(self):
return diff_set(
set(self.trial1.environment_attrs),
set(self.trial2.environment_attrs))
@property
def file_accesses(self):
return diff_set(
set(self.trial1.file_accesses),
set(self.trial2.file_accesses),
create_replaced=False)
def _ipython_display_(self):
if hasattr(self, "graph"):
return self.graph._ipython_display_()
from IPython.display import display
display({
'text/plain': 'Diff {}:{}'.format(
self.trial1.id,
self.trial2.id
)
})
def diff_dict(before, after):
result = OrderedDict()
for key in viewkeys(before):
if key != "id" and before[key] != after[key]:
result[key] = [before[key], after[key]]
return result
def diff_set(before, after, create_replaced=True):
removed = before - after
added = after - before
replaced = set()
removed_by_name = {}
for element_removed in removed:
removed_by_name[element_removed.name] = element_removed
for element_added in added:
element_removed = removed_by_name.get(element_added.name)
if element_removed and create_replaced:
replaced.add((element_removed, element_added))
if create_replaced:
for (element_removed, element_added) in replaced:
removed.discard(element_removed)
added.discard(element_added)
return (added, removed, replaced)
| true | true |
f713fd3785a1602b0c5e916541eb25874c85284a | 22,928 | py | Python | lib/streamlit/app_session.py | ab077u/streamlit | 40356a941dc39411597084d733055e310e3cfcd1 | [
"Apache-2.0"
] | null | null | null | lib/streamlit/app_session.py | ab077u/streamlit | 40356a941dc39411597084d733055e310e3cfcd1 | [
"Apache-2.0"
] | null | null | null | lib/streamlit/app_session.py | ab077u/streamlit | 40356a941dc39411597084d733055e310e3cfcd1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import uuid
from enum import Enum
from typing import TYPE_CHECKING, Callable, Optional, List, Any, cast
from streamlit.uploaded_file_manager import UploadedFileManager
import tornado.ioloop
import streamlit.elements.exception as exception_utils
from streamlit import __version__, caching, config, legacy_caching, secrets
from streamlit.case_converters import to_snake_case
from streamlit.credentials import Credentials
from streamlit.in_memory_file_manager import in_memory_file_manager
from streamlit.logger import get_logger
from streamlit.metrics_util import Installation
from streamlit.proto.ClientState_pb2 import ClientState
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.GitInfo_pb2 import GitInfo
from streamlit.proto.NewSession_pb2 import Config, CustomThemeConfig, UserInfo
from streamlit.session_data import SessionData
from streamlit.script_request_queue import RerunData, ScriptRequest, ScriptRequestQueue
from streamlit.script_runner import ScriptRunner, ScriptRunnerEvent
from streamlit.watcher.local_sources_watcher import LocalSourcesWatcher
LOGGER = get_logger(__name__)
if TYPE_CHECKING:
from streamlit.state import SessionState
class AppSessionState(Enum):
APP_NOT_RUNNING = "APP_NOT_RUNNING"
APP_IS_RUNNING = "APP_IS_RUNNING"
SHUTDOWN_REQUESTED = "SHUTDOWN_REQUESTED"
def _generate_scriptrun_id() -> str:
"""Randomly generate a unique ID for a script execution."""
return str(uuid.uuid4())
class AppSession:
"""
Contains session data for a single "user" of an active app
(that is, a connected browser tab).
Each AppSession has its own SessionData, root DeltaGenerator, ScriptRunner,
and widget state.
An AppSession is attached to each thread involved in running its script.
"""
def __init__(
self,
ioloop: tornado.ioloop.IOLoop,
session_data: SessionData,
uploaded_file_manager: UploadedFileManager,
message_enqueued_callback: Optional[Callable[[], None]],
local_sources_watcher: LocalSourcesWatcher,
):
"""Initialize the AppSession.
Parameters
----------
ioloop : tornado.ioloop.IOLoop
The Tornado IOLoop that we're running within.
session_data : SessionData
Object storing parameters related to running a script
uploaded_file_manager : UploadedFileManager
The server's UploadedFileManager.
message_enqueued_callback : Callable[[], None]
After enqueuing a message, this callable notification will be invoked.
local_sources_watcher: LocalSourcesWatcher
The file watcher that lets the session know local files have changed.
"""
# Each AppSession has a unique string ID.
self.id = str(uuid.uuid4())
self._ioloop = ioloop
self._session_data = session_data
self._uploaded_file_mgr = uploaded_file_manager
self._message_enqueued_callback = message_enqueued_callback
self._state = AppSessionState.APP_NOT_RUNNING
# Need to remember the client state here because when a script reruns
# due to the source code changing we need to pass in the previous client state.
self._client_state = ClientState()
self._local_sources_watcher = local_sources_watcher
self._local_sources_watcher.register_file_change_callback(
self._on_source_file_changed
)
self._stop_config_listener = config.on_config_parsed(
self._on_source_file_changed, force_connect=True
)
# The script should rerun when the `secrets.toml` file has been changed.
secrets._file_change_listener.connect(self._on_secrets_file_changed)
self._run_on_save = config.get_option("server.runOnSave")
# The ScriptRequestQueue is the means by which we communicate
# with the active ScriptRunner.
self._script_request_queue = ScriptRequestQueue()
self._scriptrunner: Optional[ScriptRunner] = None
# This needs to be lazily imported to avoid a dependency cycle.
from streamlit.state import SessionState
self._session_state = SessionState()
LOGGER.debug("AppSession initialized (id=%s)", self.id)
def flush_browser_queue(self) -> List[ForwardMsg]:
"""Clear the forward message queue and return the messages it contained.
The Server calls this periodically to deliver new messages
to the browser connected to this app.
Returns
-------
list[ForwardMsg]
The messages that were removed from the queue and should
be delivered to the browser.
"""
return self._session_data.flush_browser_queue()
def shutdown(self) -> None:
"""Shut down the AppSession.
It's an error to use a AppSession after it's been shut down.
"""
if self._state != AppSessionState.SHUTDOWN_REQUESTED:
LOGGER.debug("Shutting down (id=%s)", self.id)
# Clear any unused session files in upload file manager and media
# file manager
self._uploaded_file_mgr.remove_session_files(self.id)
in_memory_file_manager.clear_session_files(self.id)
in_memory_file_manager.del_expired_files()
# Shut down the ScriptRunner, if one is active.
# self._state must not be set to SHUTDOWN_REQUESTED until
# after this is called.
if self._scriptrunner is not None:
self._enqueue_script_request(ScriptRequest.SHUTDOWN)
self._state = AppSessionState.SHUTDOWN_REQUESTED
self._local_sources_watcher.close()
if self._stop_config_listener is not None:
self._stop_config_listener()
secrets._file_change_listener.disconnect(self._on_secrets_file_changed)
def enqueue(self, msg: ForwardMsg) -> None:
"""Enqueue a new ForwardMsg to our browser queue.
This can be called on both the main thread and a ScriptRunner
run thread.
Parameters
----------
msg : ForwardMsg
The message to enqueue
"""
if not config.get_option("client.displayEnabled"):
return
self._session_data.enqueue(msg)
if self._message_enqueued_callback:
self._message_enqueued_callback()
def handle_backmsg_exception(self, e: BaseException) -> None:
"""Handle an Exception raised while processing a BackMsg from the browser."""
# This does a few things:
# 1) Clears the current app in the browser.
# 2) Marks the current app as "stopped" in the browser.
# 3) HACK: Resets any script params that may have been broken (e.g. the
# command-line when rerunning with wrong argv[0])
self._on_scriptrunner_event(None, ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS)
self._on_scriptrunner_event(None, ScriptRunnerEvent.SCRIPT_STARTED)
self._on_scriptrunner_event(None, ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS)
msg = ForwardMsg()
exception_utils.marshall(msg.delta.new_element.exception, e)
self.enqueue(msg)
def request_rerun(self, client_state: Optional[ClientState]) -> None:
"""Signal that we're interested in running the script.
If the script is not already running, it will be started immediately.
Otherwise, a rerun will be requested.
Parameters
----------
client_state : streamlit.proto.ClientState_pb2.ClientState | None
The ClientState protobuf to run the script with, or None
to use previous client state.
"""
if client_state:
rerun_data = RerunData(
client_state.query_string, client_state.widget_states
)
else:
rerun_data = RerunData()
self._enqueue_script_request(ScriptRequest.RERUN, rerun_data)
@property
def session_state(self) -> "SessionState":
return self._session_state
def _on_source_file_changed(self) -> None:
"""One of our source files changed. Schedule a rerun if appropriate."""
if self._run_on_save:
self.request_rerun(self._client_state)
else:
self._enqueue_file_change_message()
def _on_secrets_file_changed(self, _) -> None:
"""Called when `secrets._file_change_listener` emits a Signal."""
# NOTE: At the time of writing, this function only calls `_on_source_file_changed`.
# The reason behind creating this function instead of just passing `_on_source_file_changed`
# to `connect` / `disconnect` directly is that every function that is passed to `connect` / `disconnect`
# must have at least one argument for `sender` (in this case we don't really care about it, thus `_`),
# and introducing an unnecessary argument to `_on_source_file_changed` just for this purpose sounded finicky.
self._on_source_file_changed()
def _clear_queue(self) -> None:
self._session_data.clear_browser_queue()
def _on_scriptrunner_event(
self,
sender: Optional[ScriptRunner],
event: ScriptRunnerEvent,
exception: Optional[BaseException] = None,
client_state: Optional[ClientState] = None,
) -> None:
"""Called when our ScriptRunner emits an event.
This is called from the sender ScriptRunner's script thread;
it is *not* called on the main thread.
Parameters
----------
sender : ScriptRunner | None
The ScriptRunner that emitted the event. This will be set to
None when called from `handle_backmsg_exception`.
event : ScriptRunnerEvent
The event type.
exception : BaseException | None
An exception thrown during compilation. Set only for the
SCRIPT_STOPPED_WITH_COMPILE_ERROR event.
client_state : streamlit.proto.ClientState_pb2.ClientState | None
The ScriptRunner's final ClientState. Set only for the
SHUTDOWN event.
"""
LOGGER.debug("OnScriptRunnerEvent: %s", event)
prev_state = self._state
if event == ScriptRunnerEvent.SCRIPT_STARTED:
if self._state != AppSessionState.SHUTDOWN_REQUESTED:
self._state = AppSessionState.APP_IS_RUNNING
self._clear_queue()
self._enqueue_new_session_message()
elif (
event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS
or event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_COMPILE_ERROR
):
if self._state != AppSessionState.SHUTDOWN_REQUESTED:
self._state = AppSessionState.APP_NOT_RUNNING
script_succeeded = event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS
self._enqueue_script_finished_message(
ForwardMsg.FINISHED_SUCCESSFULLY
if script_succeeded
else ForwardMsg.FINISHED_WITH_COMPILE_ERROR
)
if script_succeeded:
# When a script completes successfully, we update our
# LocalSourcesWatcher to account for any source code changes
# that change which modules should be watched. (This is run on
# the main thread, because LocalSourcesWatcher is not
# thread safe.)
self._ioloop.spawn_callback(
self._local_sources_watcher.update_watched_modules
)
else:
msg = ForwardMsg()
exception_utils.marshall(
msg.session_event.script_compilation_exception, exception
)
self.enqueue(msg)
elif event == ScriptRunnerEvent.SHUTDOWN:
# When ScriptRunner shuts down, update our local reference to it,
# and check to see if we need to spawn a new one. (This is run on
# the main thread.)
assert (
client_state is not None
), "client_state must be set for the SHUTDOWN event"
if self._state == AppSessionState.SHUTDOWN_REQUESTED:
# Only clear media files if the script is done running AND the
# session is actually shutting down.
in_memory_file_manager.clear_session_files(self.id)
def on_shutdown():
# We assert above that this is non-null
self._client_state = cast(ClientState, client_state)
self._scriptrunner = None
# Because a new ScriptEvent could have been enqueued while the
# scriptrunner was shutting down, we check to see if we should
# create a new one. (Otherwise, a newly-enqueued ScriptEvent
# won't be processed until another event is enqueued.)
self._maybe_create_scriptrunner()
self._ioloop.spawn_callback(on_shutdown)
# Send a message if our run state changed
app_was_running = prev_state == AppSessionState.APP_IS_RUNNING
app_is_running = self._state == AppSessionState.APP_IS_RUNNING
if app_is_running != app_was_running:
self._enqueue_session_state_changed_message()
def _enqueue_session_state_changed_message(self) -> None:
msg = ForwardMsg()
msg.session_state_changed.run_on_save = self._run_on_save
msg.session_state_changed.script_is_running = (
self._state == AppSessionState.APP_IS_RUNNING
)
self.enqueue(msg)
def _enqueue_file_change_message(self) -> None:
LOGGER.debug("Enqueuing script_changed message (id=%s)", self.id)
msg = ForwardMsg()
msg.session_event.script_changed_on_disk = True
self.enqueue(msg)
def _enqueue_new_session_message(self) -> None:
msg = ForwardMsg()
msg.new_session.script_run_id = _generate_scriptrun_id()
msg.new_session.name = self._session_data.name
msg.new_session.main_script_path = self._session_data.main_script_path
_populate_config_msg(msg.new_session.config)
_populate_theme_msg(msg.new_session.custom_theme)
# Immutable session data. We send this every time a new session is
# started, to avoid having to track whether the client has already
# received it. It does not change from run to run; it's up to the
# to perform one-time initialization only once.
imsg = msg.new_session.initialize
_populate_user_info_msg(imsg.user_info)
imsg.environment_info.streamlit_version = __version__
imsg.environment_info.python_version = ".".join(map(str, sys.version_info))
imsg.session_state.run_on_save = self._run_on_save
imsg.session_state.script_is_running = (
self._state == AppSessionState.APP_IS_RUNNING
)
imsg.command_line = self._session_data.command_line
imsg.session_id = self.id
self.enqueue(msg)
def _enqueue_script_finished_message(
self, status: "ForwardMsg.ScriptFinishedStatus.ValueType"
) -> None:
"""Enqueue a script_finished ForwardMsg."""
msg = ForwardMsg()
msg.script_finished = status
self.enqueue(msg)
def handle_git_information_request(self) -> None:
msg = ForwardMsg()
try:
from streamlit.git_util import GitRepo
repo = GitRepo(self._session_data.main_script_path)
repo_info = repo.get_repo_info()
if repo_info is None:
return
repository_name, branch, module = repo_info
msg.git_info_changed.repository = repository_name
msg.git_info_changed.branch = branch
msg.git_info_changed.module = module
msg.git_info_changed.untracked_files[:] = repo.untracked_files
msg.git_info_changed.uncommitted_files[:] = repo.uncommitted_files
if repo.is_head_detached:
msg.git_info_changed.state = GitInfo.GitStates.HEAD_DETACHED
elif len(repo.ahead_commits) > 0:
msg.git_info_changed.state = GitInfo.GitStates.AHEAD_OF_REMOTE
else:
msg.git_info_changed.state = GitInfo.GitStates.DEFAULT
self.enqueue(msg)
except Exception as e:
# Users may never even install Git in the first place, so this
# error requires no action. It can be useful for debugging.
LOGGER.debug("Obtaining Git information produced an error", exc_info=e)
def handle_rerun_script_request(
self, client_state: Optional[ClientState] = None
) -> None:
"""Tell the ScriptRunner to re-run its script.
Parameters
----------
client_state : streamlit.proto.ClientState_pb2.ClientState | None
The ClientState protobuf to run the script with, or None
to use previous client state.
"""
self.request_rerun(client_state)
def handle_stop_script_request(self) -> None:
"""Tell the ScriptRunner to stop running its script."""
self._enqueue_script_request(ScriptRequest.STOP)
def handle_clear_cache_request(self) -> None:
"""Clear this app's cache.
Because this cache is global, it will be cleared for all users.
"""
legacy_caching.clear_cache()
caching.memo.clear()
caching.singleton.clear()
self._session_state.clear_state()
def handle_set_run_on_save_request(self, new_value: bool) -> None:
"""Change our run_on_save flag to the given value.
The browser will be notified of the change.
Parameters
----------
new_value : bool
New run_on_save value
"""
self._run_on_save = new_value
self._enqueue_session_state_changed_message()
def _enqueue_script_request(self, request: ScriptRequest, data: Any = None) -> None:
"""Enqueue a ScriptEvent into our ScriptEventQueue.
If a script thread is not already running, one will be created
to handle the event.
Parameters
----------
request : ScriptRequest
The type of request.
data : Any
Data associated with the request, if any.
"""
if self._state == AppSessionState.SHUTDOWN_REQUESTED:
LOGGER.warning("Discarding %s request after shutdown" % request)
return
self._script_request_queue.enqueue(request, data)
self._maybe_create_scriptrunner()
def _maybe_create_scriptrunner(self) -> None:
"""Create a new ScriptRunner if we have unprocessed script requests.
This is called every time a ScriptRequest is enqueued, and also
after a ScriptRunner shuts down, in case new requests were enqueued
during its termination.
This function should only be called on the main thread.
"""
if (
self._state == AppSessionState.SHUTDOWN_REQUESTED
or self._scriptrunner is not None
or not self._script_request_queue.has_request
):
return
# Create the ScriptRunner, attach event handlers, and start it
self._scriptrunner = ScriptRunner(
session_id=self.id,
session_data=self._session_data,
enqueue_forward_msg=self.enqueue,
client_state=self._client_state,
request_queue=self._script_request_queue,
session_state=self._session_state,
uploaded_file_mgr=self._uploaded_file_mgr,
)
self._scriptrunner.on_event.connect(self._on_scriptrunner_event)
self._scriptrunner.start()
def _populate_config_msg(msg: Config) -> None:
msg.gather_usage_stats = config.get_option("browser.gatherUsageStats")
msg.max_cached_message_age = config.get_option("global.maxCachedMessageAge")
msg.mapbox_token = config.get_option("mapbox.token")
msg.allow_run_on_save = config.get_option("server.allowRunOnSave")
msg.hide_top_bar = config.get_option("ui.hideTopBar")
def _populate_theme_msg(msg: CustomThemeConfig) -> None:
enum_encoded_options = {"base", "font"}
theme_opts = config.get_options_for_section("theme")
if not any(theme_opts.values()):
return
for option_name, option_val in theme_opts.items():
if option_name not in enum_encoded_options and option_val is not None:
setattr(msg, to_snake_case(option_name), option_val)
# NOTE: If unset, base and font will default to the protobuf enum zero
# values, which are BaseTheme.LIGHT and FontFamily.SANS_SERIF,
# respectively. This is why we both don't handle the cases explicitly and
# also only log a warning when receiving invalid base/font options.
base_map = {
"light": msg.BaseTheme.LIGHT,
"dark": msg.BaseTheme.DARK,
}
base = theme_opts["base"]
if base is not None:
if base not in base_map:
LOGGER.warning(
f'"{base}" is an invalid value for theme.base.'
f" Allowed values include {list(base_map.keys())}."
' Setting theme.base to "light".'
)
else:
msg.base = base_map[base]
font_map = {
"sans serif": msg.FontFamily.SANS_SERIF,
"serif": msg.FontFamily.SERIF,
"monospace": msg.FontFamily.MONOSPACE,
}
font = theme_opts["font"]
if font is not None:
if font not in font_map:
LOGGER.warning(
f'"{font}" is an invalid value for theme.font.'
f" Allowed values include {list(font_map.keys())}."
' Setting theme.font to "sans serif".'
)
else:
msg.font = font_map[font]
def _populate_user_info_msg(msg: UserInfo) -> None:
msg.installation_id = Installation.instance().installation_id
msg.installation_id_v3 = Installation.instance().installation_id_v3
if Credentials.get_current().activation:
msg.email = Credentials.get_current().activation.email
else:
msg.email = ""
| 37.648604 | 117 | 0.664341 |
import sys
import uuid
from enum import Enum
from typing import TYPE_CHECKING, Callable, Optional, List, Any, cast
from streamlit.uploaded_file_manager import UploadedFileManager
import tornado.ioloop
import streamlit.elements.exception as exception_utils
from streamlit import __version__, caching, config, legacy_caching, secrets
from streamlit.case_converters import to_snake_case
from streamlit.credentials import Credentials
from streamlit.in_memory_file_manager import in_memory_file_manager
from streamlit.logger import get_logger
from streamlit.metrics_util import Installation
from streamlit.proto.ClientState_pb2 import ClientState
from streamlit.proto.ForwardMsg_pb2 import ForwardMsg
from streamlit.proto.GitInfo_pb2 import GitInfo
from streamlit.proto.NewSession_pb2 import Config, CustomThemeConfig, UserInfo
from streamlit.session_data import SessionData
from streamlit.script_request_queue import RerunData, ScriptRequest, ScriptRequestQueue
from streamlit.script_runner import ScriptRunner, ScriptRunnerEvent
from streamlit.watcher.local_sources_watcher import LocalSourcesWatcher
LOGGER = get_logger(__name__)
if TYPE_CHECKING:
from streamlit.state import SessionState
class AppSessionState(Enum):
APP_NOT_RUNNING = "APP_NOT_RUNNING"
APP_IS_RUNNING = "APP_IS_RUNNING"
SHUTDOWN_REQUESTED = "SHUTDOWN_REQUESTED"
def _generate_scriptrun_id() -> str:
return str(uuid.uuid4())
class AppSession:
def __init__(
self,
ioloop: tornado.ioloop.IOLoop,
session_data: SessionData,
uploaded_file_manager: UploadedFileManager,
message_enqueued_callback: Optional[Callable[[], None]],
local_sources_watcher: LocalSourcesWatcher,
):
self.id = str(uuid.uuid4())
self._ioloop = ioloop
self._session_data = session_data
self._uploaded_file_mgr = uploaded_file_manager
self._message_enqueued_callback = message_enqueued_callback
self._state = AppSessionState.APP_NOT_RUNNING
self._client_state = ClientState()
self._local_sources_watcher = local_sources_watcher
self._local_sources_watcher.register_file_change_callback(
self._on_source_file_changed
)
self._stop_config_listener = config.on_config_parsed(
self._on_source_file_changed, force_connect=True
)
secrets._file_change_listener.connect(self._on_secrets_file_changed)
self._run_on_save = config.get_option("server.runOnSave")
self._script_request_queue = ScriptRequestQueue()
self._scriptrunner: Optional[ScriptRunner] = None
from streamlit.state import SessionState
self._session_state = SessionState()
LOGGER.debug("AppSession initialized (id=%s)", self.id)
def flush_browser_queue(self) -> List[ForwardMsg]:
return self._session_data.flush_browser_queue()
def shutdown(self) -> None:
if self._state != AppSessionState.SHUTDOWN_REQUESTED:
LOGGER.debug("Shutting down (id=%s)", self.id)
self._uploaded_file_mgr.remove_session_files(self.id)
in_memory_file_manager.clear_session_files(self.id)
in_memory_file_manager.del_expired_files()
if self._scriptrunner is not None:
self._enqueue_script_request(ScriptRequest.SHUTDOWN)
self._state = AppSessionState.SHUTDOWN_REQUESTED
self._local_sources_watcher.close()
if self._stop_config_listener is not None:
self._stop_config_listener()
secrets._file_change_listener.disconnect(self._on_secrets_file_changed)
def enqueue(self, msg: ForwardMsg) -> None:
if not config.get_option("client.displayEnabled"):
return
self._session_data.enqueue(msg)
if self._message_enqueued_callback:
self._message_enqueued_callback()
def handle_backmsg_exception(self, e: BaseException) -> None:
self._on_scriptrunner_event(None, ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS)
self._on_scriptrunner_event(None, ScriptRunnerEvent.SCRIPT_STARTED)
self._on_scriptrunner_event(None, ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS)
msg = ForwardMsg()
exception_utils.marshall(msg.delta.new_element.exception, e)
self.enqueue(msg)
def request_rerun(self, client_state: Optional[ClientState]) -> None:
if client_state:
rerun_data = RerunData(
client_state.query_string, client_state.widget_states
)
else:
rerun_data = RerunData()
self._enqueue_script_request(ScriptRequest.RERUN, rerun_data)
@property
def session_state(self) -> "SessionState":
return self._session_state
def _on_source_file_changed(self) -> None:
if self._run_on_save:
self.request_rerun(self._client_state)
else:
self._enqueue_file_change_message()
def _on_secrets_file_changed(self, _) -> None:
# and introducing an unnecessary argument to `_on_source_file_changed` just for this purpose sounded finicky.
self._on_source_file_changed()
def _clear_queue(self) -> None:
self._session_data.clear_browser_queue()
def _on_scriptrunner_event(
self,
sender: Optional[ScriptRunner],
event: ScriptRunnerEvent,
exception: Optional[BaseException] = None,
client_state: Optional[ClientState] = None,
) -> None:
LOGGER.debug("OnScriptRunnerEvent: %s", event)
prev_state = self._state
if event == ScriptRunnerEvent.SCRIPT_STARTED:
if self._state != AppSessionState.SHUTDOWN_REQUESTED:
self._state = AppSessionState.APP_IS_RUNNING
self._clear_queue()
self._enqueue_new_session_message()
elif (
event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS
or event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_COMPILE_ERROR
):
if self._state != AppSessionState.SHUTDOWN_REQUESTED:
self._state = AppSessionState.APP_NOT_RUNNING
script_succeeded = event == ScriptRunnerEvent.SCRIPT_STOPPED_WITH_SUCCESS
self._enqueue_script_finished_message(
ForwardMsg.FINISHED_SUCCESSFULLY
if script_succeeded
else ForwardMsg.FINISHED_WITH_COMPILE_ERROR
)
if script_succeeded:
# When a script completes successfully, we update our
# LocalSourcesWatcher to account for any source code changes
# that change which modules should be watched. (This is run on
# the main thread, because LocalSourcesWatcher is not
# thread safe.)
self._ioloop.spawn_callback(
self._local_sources_watcher.update_watched_modules
)
else:
msg = ForwardMsg()
exception_utils.marshall(
msg.session_event.script_compilation_exception, exception
)
self.enqueue(msg)
elif event == ScriptRunnerEvent.SHUTDOWN:
# When ScriptRunner shuts down, update our local reference to it,
# and check to see if we need to spawn a new one. (This is run on
# the main thread.)
assert (
client_state is not None
), "client_state must be set for the SHUTDOWN event"
if self._state == AppSessionState.SHUTDOWN_REQUESTED:
# Only clear media files if the script is done running AND the
# session is actually shutting down.
in_memory_file_manager.clear_session_files(self.id)
def on_shutdown():
# We assert above that this is non-null
self._client_state = cast(ClientState, client_state)
self._scriptrunner = None
# Because a new ScriptEvent could have been enqueued while the
# scriptrunner was shutting down, we check to see if we should
# create a new one. (Otherwise, a newly-enqueued ScriptEvent
# won't be processed until another event is enqueued.)
self._maybe_create_scriptrunner()
self._ioloop.spawn_callback(on_shutdown)
app_was_running = prev_state == AppSessionState.APP_IS_RUNNING
app_is_running = self._state == AppSessionState.APP_IS_RUNNING
if app_is_running != app_was_running:
self._enqueue_session_state_changed_message()
def _enqueue_session_state_changed_message(self) -> None:
msg = ForwardMsg()
msg.session_state_changed.run_on_save = self._run_on_save
msg.session_state_changed.script_is_running = (
self._state == AppSessionState.APP_IS_RUNNING
)
self.enqueue(msg)
def _enqueue_file_change_message(self) -> None:
LOGGER.debug("Enqueuing script_changed message (id=%s)", self.id)
msg = ForwardMsg()
msg.session_event.script_changed_on_disk = True
self.enqueue(msg)
def _enqueue_new_session_message(self) -> None:
msg = ForwardMsg()
msg.new_session.script_run_id = _generate_scriptrun_id()
msg.new_session.name = self._session_data.name
msg.new_session.main_script_path = self._session_data.main_script_path
_populate_config_msg(msg.new_session.config)
_populate_theme_msg(msg.new_session.custom_theme)
# to perform one-time initialization only once.
imsg = msg.new_session.initialize
_populate_user_info_msg(imsg.user_info)
imsg.environment_info.streamlit_version = __version__
imsg.environment_info.python_version = ".".join(map(str, sys.version_info))
imsg.session_state.run_on_save = self._run_on_save
imsg.session_state.script_is_running = (
self._state == AppSessionState.APP_IS_RUNNING
)
imsg.command_line = self._session_data.command_line
imsg.session_id = self.id
self.enqueue(msg)
def _enqueue_script_finished_message(
self, status: "ForwardMsg.ScriptFinishedStatus.ValueType"
) -> None:
msg = ForwardMsg()
msg.script_finished = status
self.enqueue(msg)
def handle_git_information_request(self) -> None:
msg = ForwardMsg()
try:
from streamlit.git_util import GitRepo
repo = GitRepo(self._session_data.main_script_path)
repo_info = repo.get_repo_info()
if repo_info is None:
return
repository_name, branch, module = repo_info
msg.git_info_changed.repository = repository_name
msg.git_info_changed.branch = branch
msg.git_info_changed.module = module
msg.git_info_changed.untracked_files[:] = repo.untracked_files
msg.git_info_changed.uncommitted_files[:] = repo.uncommitted_files
if repo.is_head_detached:
msg.git_info_changed.state = GitInfo.GitStates.HEAD_DETACHED
elif len(repo.ahead_commits) > 0:
msg.git_info_changed.state = GitInfo.GitStates.AHEAD_OF_REMOTE
else:
msg.git_info_changed.state = GitInfo.GitStates.DEFAULT
self.enqueue(msg)
except Exception as e:
# Users may never even install Git in the first place, so this
# error requires no action. It can be useful for debugging.
LOGGER.debug("Obtaining Git information produced an error", exc_info=e)
def handle_rerun_script_request(
self, client_state: Optional[ClientState] = None
) -> None:
self.request_rerun(client_state)
def handle_stop_script_request(self) -> None:
self._enqueue_script_request(ScriptRequest.STOP)
def handle_clear_cache_request(self) -> None:
legacy_caching.clear_cache()
caching.memo.clear()
caching.singleton.clear()
self._session_state.clear_state()
def handle_set_run_on_save_request(self, new_value: bool) -> None:
self._run_on_save = new_value
self._enqueue_session_state_changed_message()
def _enqueue_script_request(self, request: ScriptRequest, data: Any = None) -> None:
if self._state == AppSessionState.SHUTDOWN_REQUESTED:
LOGGER.warning("Discarding %s request after shutdown" % request)
return
self._script_request_queue.enqueue(request, data)
self._maybe_create_scriptrunner()
def _maybe_create_scriptrunner(self) -> None:
if (
self._state == AppSessionState.SHUTDOWN_REQUESTED
or self._scriptrunner is not None
or not self._script_request_queue.has_request
):
return
# Create the ScriptRunner, attach event handlers, and start it
self._scriptrunner = ScriptRunner(
session_id=self.id,
session_data=self._session_data,
enqueue_forward_msg=self.enqueue,
client_state=self._client_state,
request_queue=self._script_request_queue,
session_state=self._session_state,
uploaded_file_mgr=self._uploaded_file_mgr,
)
self._scriptrunner.on_event.connect(self._on_scriptrunner_event)
self._scriptrunner.start()
def _populate_config_msg(msg: Config) -> None:
msg.gather_usage_stats = config.get_option("browser.gatherUsageStats")
msg.max_cached_message_age = config.get_option("global.maxCachedMessageAge")
msg.mapbox_token = config.get_option("mapbox.token")
msg.allow_run_on_save = config.get_option("server.allowRunOnSave")
msg.hide_top_bar = config.get_option("ui.hideTopBar")
def _populate_theme_msg(msg: CustomThemeConfig) -> None:
enum_encoded_options = {"base", "font"}
theme_opts = config.get_options_for_section("theme")
if not any(theme_opts.values()):
return
for option_name, option_val in theme_opts.items():
if option_name not in enum_encoded_options and option_val is not None:
setattr(msg, to_snake_case(option_name), option_val)
# NOTE: If unset, base and font will default to the protobuf enum zero
# values, which are BaseTheme.LIGHT and FontFamily.SANS_SERIF,
# respectively. This is why we both don't handle the cases explicitly and
base_map = {
"light": msg.BaseTheme.LIGHT,
"dark": msg.BaseTheme.DARK,
}
base = theme_opts["base"]
if base is not None:
if base not in base_map:
LOGGER.warning(
f'"{base}" is an invalid value for theme.base.'
f" Allowed values include {list(base_map.keys())}."
' Setting theme.base to "light".'
)
else:
msg.base = base_map[base]
font_map = {
"sans serif": msg.FontFamily.SANS_SERIF,
"serif": msg.FontFamily.SERIF,
"monospace": msg.FontFamily.MONOSPACE,
}
font = theme_opts["font"]
if font is not None:
if font not in font_map:
LOGGER.warning(
f'"{font}" is an invalid value for theme.font.'
f" Allowed values include {list(font_map.keys())}."
' Setting theme.font to "sans serif".'
)
else:
msg.font = font_map[font]
def _populate_user_info_msg(msg: UserInfo) -> None:
msg.installation_id = Installation.instance().installation_id
msg.installation_id_v3 = Installation.instance().installation_id_v3
if Credentials.get_current().activation:
msg.email = Credentials.get_current().activation.email
else:
msg.email = ""
| true | true |
f713fedde4b97aea695b77b89fd9f2e970232bca | 435 | py | Python | Lista4/Lista4ex7.py | hugo-paiva/IntroducaoCienciasDaComputacao | a563f2fd5b773acbffaf4c858b86423b1130ae1f | [
"MIT"
] | null | null | null | Lista4/Lista4ex7.py | hugo-paiva/IntroducaoCienciasDaComputacao | a563f2fd5b773acbffaf4c858b86423b1130ae1f | [
"MIT"
] | null | null | null | Lista4/Lista4ex7.py | hugo-paiva/IntroducaoCienciasDaComputacao | a563f2fd5b773acbffaf4c858b86423b1130ae1f | [
"MIT"
] | null | null | null | def concat(s1, s2):
if not s1:
return s2
return s1[0:1] + concat(s1[1:], s2)
def reverse(s1):
if not s1:
return s1
return concat(reverse(s1[1:]), s1[0])
def prefix(s1, s2):
if s1 == '' and s2 != '':
return True
if s1[:1] == s2[:1]:
return prefix(s1[1:], s2[1:])
return False
s1 = input()
s2 = input()
print(concat(s1, s2))
print(reverse(s1))
print(prefix(s1, s2))
| 16.111111 | 41 | 0.524138 | def concat(s1, s2):
if not s1:
return s2
return s1[0:1] + concat(s1[1:], s2)
def reverse(s1):
if not s1:
return s1
return concat(reverse(s1[1:]), s1[0])
def prefix(s1, s2):
if s1 == '' and s2 != '':
return True
if s1[:1] == s2[:1]:
return prefix(s1[1:], s2[1:])
return False
s1 = input()
s2 = input()
print(concat(s1, s2))
print(reverse(s1))
print(prefix(s1, s2))
| true | true |
f713feed6e11f6d272abab6a55db7e1b8686813b | 4,620 | py | Python | env/lib/python3.8/site-packages/ask_sdk_model/interfaces/audioplayer/stream.py | adamash99/alexa-play-pot-of-greed | dc2d18dae55692a4bf1becb72685a5777870c643 | [
"MIT"
] | 90 | 2018-09-19T21:56:42.000Z | 2022-03-30T11:25:21.000Z | ask-sdk-model/ask_sdk_model/interfaces/audioplayer/stream.py | ishitaojha/alexa-apis-for-python | a68f94b7a0e41f819595d6fe56e800403e8a4194 | [
"Apache-2.0"
] | 11 | 2018-09-23T12:16:48.000Z | 2021-06-10T19:49:45.000Z | ask-sdk-model/ask_sdk_model/interfaces/audioplayer/stream.py | ishitaojha/alexa-apis-for-python | a68f94b7a0e41f819595d6fe56e800403e8a4194 | [
"Apache-2.0"
] | 28 | 2018-09-19T22:30:38.000Z | 2022-02-22T22:57:07.000Z | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
from ask_sdk_model.interfaces.audioplayer.caption_data import CaptionData as CaptionData_e119f120
class Stream(object):
"""
:param expected_previous_token:
:type expected_previous_token: (optional) str
:param token:
:type token: (optional) str
:param url:
:type url: (optional) str
:param offset_in_milliseconds:
:type offset_in_milliseconds: (optional) int
:param caption_data:
:type caption_data: (optional) ask_sdk_model.interfaces.audioplayer.caption_data.CaptionData
"""
deserialized_types = {
'expected_previous_token': 'str',
'token': 'str',
'url': 'str',
'offset_in_milliseconds': 'int',
'caption_data': 'ask_sdk_model.interfaces.audioplayer.caption_data.CaptionData'
} # type: Dict
attribute_map = {
'expected_previous_token': 'expectedPreviousToken',
'token': 'token',
'url': 'url',
'offset_in_milliseconds': 'offsetInMilliseconds',
'caption_data': 'captionData'
} # type: Dict
supports_multiple_types = False
def __init__(self, expected_previous_token=None, token=None, url=None, offset_in_milliseconds=None, caption_data=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[int], Optional[CaptionData_e119f120]) -> None
"""
:param expected_previous_token:
:type expected_previous_token: (optional) str
:param token:
:type token: (optional) str
:param url:
:type url: (optional) str
:param offset_in_milliseconds:
:type offset_in_milliseconds: (optional) int
:param caption_data:
:type caption_data: (optional) ask_sdk_model.interfaces.audioplayer.caption_data.CaptionData
"""
self.__discriminator_value = None # type: str
self.expected_previous_token = expected_previous_token
self.token = token
self.url = url
self.offset_in_milliseconds = offset_in_milliseconds
self.caption_data = caption_data
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Stream):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 33.970588 | 123 | 0.614286 |
import pprint
import re
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
from ask_sdk_model.interfaces.audioplayer.caption_data import CaptionData as CaptionData_e119f120
class Stream(object):
deserialized_types = {
'expected_previous_token': 'str',
'token': 'str',
'url': 'str',
'offset_in_milliseconds': 'int',
'caption_data': 'ask_sdk_model.interfaces.audioplayer.caption_data.CaptionData'
}
attribute_map = {
'expected_previous_token': 'expectedPreviousToken',
'token': 'token',
'url': 'url',
'offset_in_milliseconds': 'offsetInMilliseconds',
'caption_data': 'captionData'
}
supports_multiple_types = False
def __init__(self, expected_previous_token=None, token=None, url=None, offset_in_milliseconds=None, caption_data=None):
self.__discriminator_value = None
self.expected_previous_token = expected_previous_token
self.token = token
self.url = url
self.offset_in_milliseconds = offset_in_milliseconds
self.caption_data = caption_data
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Stream):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f713feeff5540cb5abd88be8581806ea4a417b09 | 14,614 | py | Python | oscar/lib/python2.7/site-packages/IPython/core/pylabtools.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/IPython/core/pylabtools.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/IPython/core/pylabtools.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities."""
from __future__ import print_function
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
from IPython.utils import py3compat
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'gtk3': 'GTK3Agg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX',
'nbagg': 'nbAgg',
'notebook': 'nbAgg',
'agg': 'agg',
'inline': 'module://ipykernel.pylab.backend_inline',
'ipympl': 'module://ipympl.backend_nbagg',
}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['GTK3Cairo'] = 'gtk3'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
# And some backends that don't need GUI integration
del backend2gui['nbAgg']
del backend2gui['agg']
del backend2gui['module://ipykernel.pylab.backend_inline']
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
"""Print a figure to an image, and return the resulting file data
Returned data will be bytes unless ``fmt='svg'``,
in which case it will be unicode.
Any keyword args are passed to fig.canvas.print_figure,
such as ``quality`` or ``bbox_inches``.
"""
from matplotlib import rcParams
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
dpi = fig.dpi
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
# build keyword args
kw = dict(
format=fmt,
facecolor=fig.get_facecolor(),
edgecolor=fig.get_edgecolor(),
dpi=dpi,
bbox_inches=bbox_inches,
)
# **kwargs get higher priority
kw.update(kwargs)
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if fmt == 'svg':
data = data.decode('utf-8')
return data
def retina_figure(fig, **kwargs):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina', **kwargs)
# Make sure that retina_figure acts just like print_figure and returns
# None when the figure is empty.
if pngdata is None:
return
w, h = _pngxy(pngdata)
metadata = dict(width=w//2, height=h//2)
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of IPython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pyplot as plt
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if plt.draw_if_interactive.called:
plt.draw()
plt.draw_if_interactive.called = False
# re-draw everything that is stale
try:
da = plt.draw_all
except AttributeError:
pass
else:
da()
return mpl_execfile
def _reshow_nbagg_figure(fig):
"""reshow an nbagg figure"""
try:
reshow = fig.canvas.manager.reshow
except AttributeError:
raise NotImplementedError()
else:
reshow()
def select_figure_formats(shell, formats, **kwargs):
"""Select figure formats for the inline backend.
Parameters
==========
shell : InteractiveShell
The main IPython instance.
formats : str or set
One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs : any
Extra keyword arguments to be passed to fig.canvas.print_figure.
"""
import matplotlib
from matplotlib.figure import Figure
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
jpg_formatter = shell.display_formatter.formatters['image/jpeg']
pdf_formatter = shell.display_formatter.formatters['application/pdf']
if isinstance(formats, py3compat.string_types):
formats = {formats}
# cast in case of list / tuple
formats = set(formats)
[ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
mplbackend = matplotlib.get_backend().lower()
if mplbackend == 'nbagg' or mplbackend == 'module://ipympl.backend_nbagg':
formatter = shell.display_formatter.ipython_display_formatter
formatter.for_type(Figure, _reshow_nbagg_figure)
supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
bad = formats.difference(supported)
if bad:
bs = "%s" % ','.join([repr(f) for f in bad])
gs = "%s" % ','.join([repr(f) for f in supported])
raise ValueError("supported formats are: %s not %s" % (gs, bs))
if 'png' in formats:
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
if 'retina' in formats or 'png2x' in formats:
png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
if 'jpg' in formats or 'jpeg' in formats:
jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
if 'svg' in formats:
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
if 'pdf' in formats:
pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://ipykernel.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pyplot as plt
plt.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
plt.draw_if_interactive = flag_calls(plt.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from IPython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec(s, user_ns)
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec(s, user_ns)
# IPython symbols to add
user_ns['figsize'] = figsize
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from ipykernel.pylab.backend_inline import InlineBackend
except ImportError:
return
import matplotlib
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from ipykernel.pylab.backend_inline import flush_figures
shell.events.register('post_execute', flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = dict()
for k in cfg.rc:
shell._saved_rcParams[k] = matplotlib.rcParams[k]
# load inline_rc
matplotlib.rcParams.update(cfg.rc)
new_backend_name = "inline"
else:
from ipykernel.pylab.backend_inline import flush_figures
try:
shell.events.unregister('post_execute', flush_figures)
except ValueError:
pass
if hasattr(shell, '_saved_rcParams'):
matplotlib.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
new_backend_name = "other"
# only enable the formats once -> don't change the enabled formats (which the user may
# has changed) when getting another "%matplotlib inline" call.
# See https://github.com/ipython/ipykernel/issues/29
cur_backend = getattr(configure_inline_support, "current_backend", "unset")
if new_backend_name != cur_backend:
# Setup the default figure format
select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
configure_inline_support.current_backend = new_backend_name
| 35.643902 | 91 | 0.622212 |
from __future__ import print_function
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
from IPython.utils import py3compat
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'gtk3': 'GTK3Agg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX',
'nbagg': 'nbAgg',
'notebook': 'nbAgg',
'agg': 'agg',
'inline': 'module://ipykernel.pylab.backend_inline',
'ipympl': 'module://ipympl.backend_nbagg',
}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
backend2gui = dict(zip(backends.values(), backends.keys()))
backend2gui['Qt4Agg'] = 'qt'
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['GTK3Cairo'] = 'gtk3'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
del backend2gui['nbAgg']
del backend2gui['agg']
del backend2gui['module://ipykernel.pylab.backend_inline']
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
from matplotlib import rcParams
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
dpi = fig.dpi
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
# build keyword args
kw = dict(
format=fmt,
facecolor=fig.get_facecolor(),
edgecolor=fig.get_edgecolor(),
dpi=dpi,
bbox_inches=bbox_inches,
)
# **kwargs get higher priority
kw.update(kwargs)
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if fmt == 'svg':
data = data.decode('utf-8')
return data
def retina_figure(fig, **kwargs):
pngdata = print_figure(fig, fmt='retina', **kwargs)
# Make sure that retina_figure acts just like print_figure and returns
# None when the figure is empty.
if pngdata is None:
return
w, h = _pngxy(pngdata)
metadata = dict(width=w//2, height=h//2)
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
def mpl_execfile(fname,*where,**kw):
import matplotlib
import matplotlib.pyplot as plt
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if plt.draw_if_interactive.called:
plt.draw()
plt.draw_if_interactive.called = False
# re-draw everything that is stale
try:
da = plt.draw_all
except AttributeError:
pass
else:
da()
return mpl_execfile
def _reshow_nbagg_figure(fig):
try:
reshow = fig.canvas.manager.reshow
except AttributeError:
raise NotImplementedError()
else:
reshow()
def select_figure_formats(shell, formats, **kwargs):
import matplotlib
from matplotlib.figure import Figure
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
jpg_formatter = shell.display_formatter.formatters['image/jpeg']
pdf_formatter = shell.display_formatter.formatters['application/pdf']
if isinstance(formats, py3compat.string_types):
formats = {formats}
# cast in case of list / tuple
formats = set(formats)
[ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
mplbackend = matplotlib.get_backend().lower()
if mplbackend == 'nbagg' or mplbackend == 'module://ipympl.backend_nbagg':
formatter = shell.display_formatter.ipython_display_formatter
formatter.for_type(Figure, _reshow_nbagg_figure)
supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
bad = formats.difference(supported)
if bad:
bs = "%s" % ','.join([repr(f) for f in bad])
gs = "%s" % ','.join([repr(f) for f in supported])
raise ValueError("supported formats are: %s not %s" % (gs, bs))
if 'png' in formats:
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
if 'retina' in formats or 'png2x' in formats:
png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
if 'jpg' in formats or 'jpeg' in formats:
jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
if 'svg' in formats:
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
if 'pdf' in formats:
pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pyplot as plt
plt.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
plt.draw_if_interactive = flag_calls(plt.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
# Import numpy as np/pyplot as plt are conventions we're trying to
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec(s, user_ns)
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec(s, user_ns)
user_ns['figsize'] = figsize
from IPython.core.display import display
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from ipykernel.pylab.backend_inline import InlineBackend
except ImportError:
return
import matplotlib
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from ipykernel.pylab.backend_inline import flush_figures
shell.events.register('post_execute', flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = dict()
for k in cfg.rc:
shell._saved_rcParams[k] = matplotlib.rcParams[k]
# load inline_rc
matplotlib.rcParams.update(cfg.rc)
new_backend_name = "inline"
else:
from ipykernel.pylab.backend_inline import flush_figures
try:
shell.events.unregister('post_execute', flush_figures)
except ValueError:
pass
if hasattr(shell, '_saved_rcParams'):
matplotlib.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
new_backend_name = "other"
# only enable the formats once -> don't change the enabled formats (which the user may
cur_backend = getattr(configure_inline_support, "current_backend", "unset")
if new_backend_name != cur_backend:
select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
configure_inline_support.current_backend = new_backend_name
| true | true |
f713ff1895f64aa70104aa664da6eedd61326435 | 35,539 | py | Python | sdk/batch/azure-mgmt-batch/azure/mgmt/batch/aio/operations/_pool_operations.py | eliagrady/azure-sdk-for-python | 64d0c78ce4a9b409dbc96864d7b65098891863f9 | [
"MIT"
] | null | null | null | sdk/batch/azure-mgmt-batch/azure/mgmt/batch/aio/operations/_pool_operations.py | eliagrady/azure-sdk-for-python | 64d0c78ce4a9b409dbc96864d7b65098891863f9 | [
"MIT"
] | null | null | null | sdk/batch/azure-mgmt-batch/azure/mgmt/batch/aio/operations/_pool_operations.py | eliagrady/azure-sdk-for-python | 64d0c78ce4a9b409dbc96864d7b65098891863f9 | [
"MIT"
] | 1 | 2021-05-19T02:55:10.000Z | 2021-05-19T02:55:10.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PoolOperations:
"""PoolOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.batch.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_batch_account(
self,
resource_group_name: str,
account_name: str,
maxresults: Optional[int] = None,
select: Optional[str] = None,
filter: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.ListPoolsResult"]:
"""Lists all of the pools in the specified account.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param maxresults: The maximum number of items to return in the response.
:type maxresults: int
:param select: Comma separated list of properties that should be returned. e.g.
"properties/provisioningState". Only top level properties under properties/ are valid for
selection.
:type select: str
:param filter: OData filter expression. Valid properties for filtering are:
name
properties/allocationState
properties/allocationStateTransitionTime
properties/creationTime
properties/provisioningState
properties/provisioningStateTransitionTime
properties/lastModified
properties/vmSize
properties/interNodeCommunication
properties/scaleSettings/autoScale
properties/scaleSettings/fixedScale.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListPoolsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.batch.models.ListPoolsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListPoolsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_batch_account.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListPoolsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_batch_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
parameters: "_models.Pool",
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs
) -> "_models.Pool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Pool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Pool')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
parameters: "_models.Pool",
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs
) -> AsyncLROPoller["_models.Pool"]:
"""Creates a new pool inside the specified account.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the account.
:type pool_name: str
:param parameters: Additional parameters for pool creation.
:type parameters: ~azure.mgmt.batch.models.Pool
:param if_match: The entity state (ETag) version of the pool to update. A value of "*" can be
used to apply the operation only if the pool already exists. If omitted, this operation will
always be applied.
:type if_match: str
:param if_none_match: Set to '*' to allow a new pool to be created, but to prevent updating an
existing pool. Other values will be ignored.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Pool or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.batch.models.Pool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Pool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
parameters=parameters,
if_match=if_match,
if_none_match=if_none_match,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} # type: ignore
async def update(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
parameters: "_models.Pool",
if_match: Optional[str] = None,
**kwargs
) -> "_models.Pool":
"""Updates the properties of an existing pool.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the account.
:type pool_name: str
:param parameters: Pool properties that should be updated. Properties that are supplied will be
updated, any property not supplied will be unchanged.
:type parameters: ~azure.mgmt.batch.models.Pool
:param if_match: The entity state (ETag) version of the pool to update. This value can be
omitted or set to "*" to apply the operation unconditionally.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Pool, or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.Pool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Pool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Pool')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified pool.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the account.
:type pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} # type: ignore
async def get(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> "_models.Pool":
"""Gets information about the specified pool.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the account.
:type pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Pool, or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.Pool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Pool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} # type: ignore
async def disable_auto_scale(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> "_models.Pool":
"""Disables automatic scaling for a pool.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the account.
:type pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Pool, or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.Pool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Pool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.disable_auto_scale.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
disable_auto_scale.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}/disableAutoScale'} # type: ignore
async def stop_resize(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> "_models.Pool":
"""Stops an ongoing resize operation on the pool.
This does not restore the pool to its previous state before the resize operation: it only stops
any further changes being made, and the pool maintains its current state. After stopping, the
pool stabilizes at the number of nodes it was at when the stop operation was done. During the
stop operation, the pool allocation state changes first to stopping and then to steady. A
resize operation need not be an explicit resize pool request; this API can also be used to halt
the initial sizing of the pool when it is created.
:param resource_group_name: The name of the resource group that contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param pool_name: The pool name. This must be unique within the account.
:type pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Pool, or the result of cls(response)
:rtype: ~azure.mgmt.batch.models.Pool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Pool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.stop_resize.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
stop_resize.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}/stopResize'} # type: ignore
| 50.842632 | 215 | 0.662624 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PoolOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_batch_account(
self,
resource_group_name: str,
account_name: str,
maxresults: Optional[int] = None,
select: Optional[str] = None,
filter: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.ListPoolsResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_by_batch_account.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListPoolsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_batch_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools'}
async def _create_initial(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
parameters: "_models.Pool",
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs
) -> "_models.Pool":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'Pool')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'}
async def begin_create(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
parameters: "_models.Pool",
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs
) -> AsyncLROPoller["_models.Pool"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
parameters=parameters,
if_match=if_match,
if_none_match=if_none_match,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'}
async def update(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
parameters: "_models.Pool",
if_match: Optional[str] = None,
**kwargs
) -> "_models.Pool":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'Pool')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'}
async def _delete_initial(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'}
async def begin_delete(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'}
async def get(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> "_models.Pool":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'}
async def disable_auto_scale(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> "_models.Pool":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.disable_auto_scale.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
disable_auto_scale.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}/disableAutoScale'}
async def stop_resize(
self,
resource_group_name: str,
account_name: str,
pool_name: str,
**kwargs
) -> "_models.Pool":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
url = self.stop_resize.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[a-zA-Z0-9]+$'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9_-]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Pool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
stop_resize.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}/stopResize'}
| true | true |
f713ff85630f45ddfb90ff880de1b07a38106d1e | 1,867 | py | Python | plaso/cli/helpers/vfs_backend.py | rgayon/plaso | 5f1d0f2da19a28a00ab62c276162483e79a42efb | [
"Apache-2.0"
] | 1 | 2020-12-04T10:26:34.000Z | 2020-12-04T10:26:34.000Z | plaso/cli/helpers/vfs_backend.py | dvntaudio/plaso | 6debdabbce3619b3210efa2a2cbc91242c02d4e3 | [
"Apache-2.0"
] | null | null | null | plaso/cli/helpers/vfs_backend.py | dvntaudio/plaso | 6debdabbce3619b3210efa2a2cbc91242c02d4e3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""The VFS back-end CLI arguments helper."""
from __future__ import unicode_literals
from plaso.cli import tools
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.lib import errors
class VFSBackEndArgumentsHelper(interface.ArgumentsHelper):
"""VFS back-end CLI arguments helper."""
NAME = 'vfs_backend'
DESCRIPTION = 'dfVFS back-end command line arguments.'
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'--vfs_back_end', '--vfs-back-end', dest='vfs_back_end',
choices=['auto', 'fsext', 'fshfs', 'fsntfs', 'tsk'], action='store',
metavar='TYPE', default='auto', help=(
'The preferred dfVFS back-end: "auto", "fsext", "fshfs", "fsntfs" '
'or "tsk".'))
@classmethod
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
vfs_back_end = cls._ParseStringOption(options, 'vfs_back_end')
setattr(configuration_object, '_vfs_back_end', vfs_back_end)
manager.ArgumentHelperManager.RegisterHelper(VFSBackEndArgumentsHelper)
| 32.189655 | 79 | 0.705945 |
from __future__ import unicode_literals
from plaso.cli import tools
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.lib import errors
class VFSBackEndArgumentsHelper(interface.ArgumentsHelper):
NAME = 'vfs_backend'
DESCRIPTION = 'dfVFS back-end command line arguments.'
@classmethod
def AddArguments(cls, argument_group):
argument_group.add_argument(
'--vfs_back_end', '--vfs-back-end', dest='vfs_back_end',
choices=['auto', 'fsext', 'fshfs', 'fsntfs', 'tsk'], action='store',
metavar='TYPE', default='auto', help=(
'The preferred dfVFS back-end: "auto", "fsext", "fshfs", "fsntfs" '
'or "tsk".'))
@classmethod
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
vfs_back_end = cls._ParseStringOption(options, 'vfs_back_end')
setattr(configuration_object, '_vfs_back_end', vfs_back_end)
manager.ArgumentHelperManager.RegisterHelper(VFSBackEndArgumentsHelper)
| true | true |
f71401cefe5604f63cb273a6cb7d6715836fee70 | 2,146 | py | Python | fab_deploy/contrib/servers.py | samdolan/django-fab-deploy | 642e5cc319f811d4ee647d65388c85988ac887e2 | [
"Unlicense"
] | 1 | 2019-08-04T20:54:43.000Z | 2019-08-04T20:54:43.000Z | fab_deploy/contrib/servers.py | samdolan/django-fab-deploy | 642e5cc319f811d4ee647d65388c85988ac887e2 | [
"Unlicense"
] | null | null | null | fab_deploy/contrib/servers.py | samdolan/django-fab-deploy | 642e5cc319f811d4ee647d65388c85988ac887e2 | [
"Unlicense"
] | null | null | null | from .constants import ALL_ROLES, DB_ROLE, WEB_ROLE
from .database import setup_db
from .django import update_db, update_python_libs
from .nginx import stop_nginx, start_nginx
from .ssh import setup_ssh_key
from .supervisor import stop_supervisor, start_supervisor, update_supervisor
from .utils import get_ip
from .webserver import setup_web
from fabric.colors import green
from fabric.api import *
from .git import get_source
from .nginx import update_nginx
import cuisine
COMMON_PACKAGES = [
'subversion', 'mercurial', 'git-core', 'vim', 'python-dev', 'ufw',
'python-setuptools', 'htop', 'ntp', 'colordiff', 'python-software-properties',
'psmisc',
'libpq-dev', # postgres
]
@task
@roles(DB_ROLE)
@runs_once
def set_database_ip(interface='eth1'):
"""Set the ip of the database."""
env.db_ip = get_ip(interface)
@task
@roles(WEB_ROLE)
@runs_once
def set_web_server_ips(interface='eth1'):
"""Set the ips of the webservers."""
env.webserver_internal_ips = [get_ip(interface),]
@task
def set_port(port):
"""Set the port to use for ssh connections."""
env.port = port
@task
@roles(ALL_ROLES)
def setup_common():
"""Set common packages."""
print(green("Running setup_common.........."))
execute(setup_ssh_key)
cuisine.package_install(COMMON_PACKAGES, True)
sudo('yes | ufw enable')
sudo('ufw logging on')
sudo('ufw allow %(port)s' % env)
sudo('ufw limit ssh')
sudo('ufw default deny')
@task
@roles(WEB_ROLE)
def setup_run_dirs():
for d in (env.log_location, env.socket_location):
with settings(warn_only=True):
sudo('mkdir %s' % d)
sudo('chown -R %s: %s' % (env.deploy_user, d))
@task
def setup():
"""Setup the servers."""
execute(setup_db)
execute(setup_web)
execute(update)
@task
def update():
"""Update the servers w/the latest source code + migrations."""
execute(stop_supervisor)
execute(stop_nginx)
execute(get_source)
execute(update_python_libs)
execute(update_db)
execute(update_supervisor)
execute(update_nginx)
execute(start_supervisor)
execute(start_nginx)
| 23.582418 | 82 | 0.695247 | from .constants import ALL_ROLES, DB_ROLE, WEB_ROLE
from .database import setup_db
from .django import update_db, update_python_libs
from .nginx import stop_nginx, start_nginx
from .ssh import setup_ssh_key
from .supervisor import stop_supervisor, start_supervisor, update_supervisor
from .utils import get_ip
from .webserver import setup_web
from fabric.colors import green
from fabric.api import *
from .git import get_source
from .nginx import update_nginx
import cuisine
COMMON_PACKAGES = [
'subversion', 'mercurial', 'git-core', 'vim', 'python-dev', 'ufw',
'python-setuptools', 'htop', 'ntp', 'colordiff', 'python-software-properties',
'psmisc',
'libpq-dev',
]
@task
@roles(DB_ROLE)
@runs_once
def set_database_ip(interface='eth1'):
env.db_ip = get_ip(interface)
@task
@roles(WEB_ROLE)
@runs_once
def set_web_server_ips(interface='eth1'):
env.webserver_internal_ips = [get_ip(interface),]
@task
def set_port(port):
env.port = port
@task
@roles(ALL_ROLES)
def setup_common():
print(green("Running setup_common.........."))
execute(setup_ssh_key)
cuisine.package_install(COMMON_PACKAGES, True)
sudo('yes | ufw enable')
sudo('ufw logging on')
sudo('ufw allow %(port)s' % env)
sudo('ufw limit ssh')
sudo('ufw default deny')
@task
@roles(WEB_ROLE)
def setup_run_dirs():
for d in (env.log_location, env.socket_location):
with settings(warn_only=True):
sudo('mkdir %s' % d)
sudo('chown -R %s: %s' % (env.deploy_user, d))
@task
def setup():
execute(setup_db)
execute(setup_web)
execute(update)
@task
def update():
execute(stop_supervisor)
execute(stop_nginx)
execute(get_source)
execute(update_python_libs)
execute(update_db)
execute(update_supervisor)
execute(update_nginx)
execute(start_supervisor)
execute(start_nginx)
| true | true |
f71404e9bb036de5356fa647bf6aa5feffbe1d2a | 1,654 | py | Python | cog/help.py | tasuren/TunaBot | dd9ecf79280c388bd3b38cb1822b5dbc3666a86c | [
"MIT"
] | 1 | 2021-05-22T07:30:40.000Z | 2021-05-22T07:30:40.000Z | cog/help.py | tasuren/TunaBot | dd9ecf79280c388bd3b38cb1822b5dbc3666a86c | [
"MIT"
] | 1 | 2021-12-14T12:24:30.000Z | 2021-12-14T12:24:30.000Z | cog/help.py | tasuren/TunaBot | dd9ecf79280c388bd3b38cb1822b5dbc3666a86c | [
"MIT"
] | 2 | 2020-10-25T01:29:16.000Z | 2022-02-26T04:49:20.000Z | # TunaBot Ext - Help
from discord.ext import commands
import discord
from aiofiles import open as async_open
from ujson import load, loads
from data import is_admin
JSON_PATH = "data/help.json"
class Help(commands.Cog):
def __init__(self, bot):
self.bot, self.tuna = bot, bot.data
with open(JSON_PATH, 'r') as f:
self.data = load(f)
@commands.command(aliases=["rh"])
@is_admin
async def reloadhelp(self, ctx):
async with async_open(JSON_PATH, "r") as f:
self.data = loads(f.read())
await ctx.reply("Ok")
@commands.command()
async def help(self, ctx, *, cmd=None):
title, description = None, None
if cmd:
keys = []
for category in self.data:
if cmd == category:
keys.append(category)
break
for c in self.data[category]:
if c == cmd:
keys.append(category)
keys.append(c)
break
if len(keys) == 2:
title = f"{cmd}のHELP"
description = self.data[keys[0]][keys[1]]
elif len(keys) == 1:
title = f"{cmd}のHELP"
description = "\n".join(f"`{key}`" for key in self.data[category])
else:
title, description = "HELP", "見つかりませんでした。"
else:
title, description = "HELP", "\n".join(f"`{key}`" for key in self.data)
await ctx.reply(embed=discord.Embed(title=title, description=description))
def setup(bot):
bot.add_cog(Help(bot))
| 29.017544 | 83 | 0.52237 |
from discord.ext import commands
import discord
from aiofiles import open as async_open
from ujson import load, loads
from data import is_admin
JSON_PATH = "data/help.json"
class Help(commands.Cog):
def __init__(self, bot):
self.bot, self.tuna = bot, bot.data
with open(JSON_PATH, 'r') as f:
self.data = load(f)
@commands.command(aliases=["rh"])
@is_admin
async def reloadhelp(self, ctx):
async with async_open(JSON_PATH, "r") as f:
self.data = loads(f.read())
await ctx.reply("Ok")
@commands.command()
async def help(self, ctx, *, cmd=None):
title, description = None, None
if cmd:
keys = []
for category in self.data:
if cmd == category:
keys.append(category)
break
for c in self.data[category]:
if c == cmd:
keys.append(category)
keys.append(c)
break
if len(keys) == 2:
title = f"{cmd}のHELP"
description = self.data[keys[0]][keys[1]]
elif len(keys) == 1:
title = f"{cmd}のHELP"
description = "\n".join(f"`{key}`" for key in self.data[category])
else:
title, description = "HELP", "見つかりませんでした。"
else:
title, description = "HELP", "\n".join(f"`{key}`" for key in self.data)
await ctx.reply(embed=discord.Embed(title=title, description=description))
def setup(bot):
bot.add_cog(Help(bot))
| true | true |
f71405186d46fa9050d4b62064a287cfd9e3f822 | 485 | py | Python | main.py | FayasNoushad/Requote-URL-Bot | 24f35ba56981d455d7e80a7a3dfb60b1cacb3caa | [
"MIT"
] | 5 | 2021-09-04T06:15:05.000Z | 2021-11-28T12:00:35.000Z | main.py | sreeragbot/Requote-URL-Bot | 24f35ba56981d455d7e80a7a3dfb60b1cacb3caa | [
"MIT"
] | null | null | null | main.py | sreeragbot/Requote-URL-Bot | 24f35ba56981d455d7e80a7a3dfb60b1cacb3caa | [
"MIT"
] | 5 | 2021-09-04T06:45:24.000Z | 2022-02-06T18:05:24.000Z | import os
from requests.utils import requote_uri
from pyrogram import Client, filters
Bot = Client(
"Requote-URL-Bot",
bot_token = os.environ["BOT_TOKEN"],
api_id = int(os.environ["API_ID"]),
api_hash = os.environ["API_HASH"]
)
@Bot.on_message(filters.text)
async def filter(bot, update):
await update.reply_text(
text=f"`{requote_uri(update.text)}`\n\nMade by @FayasNoushad",
disable_web_page_preview=True,
quote=True
)
Bot.run()
| 20.208333 | 70 | 0.676289 | import os
from requests.utils import requote_uri
from pyrogram import Client, filters
Bot = Client(
"Requote-URL-Bot",
bot_token = os.environ["BOT_TOKEN"],
api_id = int(os.environ["API_ID"]),
api_hash = os.environ["API_HASH"]
)
@Bot.on_message(filters.text)
async def filter(bot, update):
await update.reply_text(
text=f"`{requote_uri(update.text)}`\n\nMade by @FayasNoushad",
disable_web_page_preview=True,
quote=True
)
Bot.run()
| true | true |
f714056b038088a8e129999ae37a63685ffc8967 | 1,296 | py | Python | settings.py | UUDigitalHumanitieslab/historic-hebrew-dates | 5ace44d9b1315a96a96ea296383f0b618d994212 | [
"BSD-3-Clause"
] | 1 | 2020-06-24T10:29:17.000Z | 2020-06-24T10:29:17.000Z | settings.py | UUDigitalHumanitieslab/historic-hebrew-dates | 5ace44d9b1315a96a96ea296383f0b618d994212 | [
"BSD-3-Clause"
] | 11 | 2019-10-10T08:50:01.000Z | 2022-03-02T05:23:55.000Z | settings.py | UUDigitalHumanitieslab/historic-hebrew-dates | 5ace44d9b1315a96a96ea296383f0b618d994212 | [
"BSD-3-Clause"
] | null | null | null | """ This is magic glue for integrating the frontend and backend.
This is NOT the place for backend customizations. Go to
api/historic_hebrew_dates_ui/settings.py instead.
"""
import os.path as op
here = op.dirname(op.abspath(__file__))
# First, import the standard backend settings. This requires some
# magic because the backend directory itself is not a Python package.
# Imitated from https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
# or
# https://stackoverflow.com/a/29855240
# (respectively for Python >= 3.5 and Python 3.4)
import sys
from importlib import util, machinery
settings_name = 'settings'
settings_path = op.join(here, 'api', 'historic_hebrew_dates_ui', 'settings.py')
if sys.version_info >= (3, 5):
spec = util.spec_from_file_location(settings_name, settings_path)
settings = util.module_from_spec(spec)
spec.loader.exec_module(settings)
else:
settings = machinery.SourceFileLoader(settings_name, settings_path).load_module()
sys.modules[settings_name] = settings
from settings import *
# Next, augment the settings to make the backend aware of the frontend.
STATICFILES_DIRS += [
op.join(here, 'web-ui', 'dist'),
op.join(here, 'web-ui', 'node_modules'),
]
PROXY_FRONTEND = "http://localhost:4200"
| 28.8 | 97 | 0.748457 |
import os.path as op
here = op.dirname(op.abspath(__file__))
port util, machinery
settings_name = 'settings'
settings_path = op.join(here, 'api', 'historic_hebrew_dates_ui', 'settings.py')
if sys.version_info >= (3, 5):
spec = util.spec_from_file_location(settings_name, settings_path)
settings = util.module_from_spec(spec)
spec.loader.exec_module(settings)
else:
settings = machinery.SourceFileLoader(settings_name, settings_path).load_module()
sys.modules[settings_name] = settings
from settings import *
STATICFILES_DIRS += [
op.join(here, 'web-ui', 'dist'),
op.join(here, 'web-ui', 'node_modules'),
]
PROXY_FRONTEND = "http://localhost:4200"
| true | true |
f714058cf6804da67a6dd34483c0d76cf1bd23ea | 3,828 | py | Python | delfin/tests/unit/drivers/dell_emc/vmax/test_alert_handler.py | joseph-v/SIM | 61fedb261aa745d715b8a30c0945a6244fb807e2 | [
"Apache-2.0"
] | 4 | 2020-04-10T03:48:55.000Z | 2020-04-27T07:52:55.000Z | delfin/tests/unit/drivers/dell_emc/vmax/test_alert_handler.py | joseph-v/SIM | 61fedb261aa745d715b8a30c0945a6244fb807e2 | [
"Apache-2.0"
] | 210 | 2020-05-08T04:06:49.000Z | 2020-06-22T12:59:02.000Z | delfin/tests/unit/drivers/dell_emc/vmax/test_alert_handler.py | joseph-v/SIM | 61fedb261aa745d715b8a30c0945a6244fb807e2 | [
"Apache-2.0"
] | 10 | 2020-04-11T07:09:55.000Z | 2020-04-28T09:50:13.000Z | # Copyright 2020 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from oslo_utils import importutils
from delfin import exception
from delfin.common import constants
class AlertHandlerTestCase(unittest.TestCase):
ALERT_HANDLER_CLASS = 'delfin.drivers.dell_emc.vmax.alert_handler' \
'.snmp_alerts.AlertHandler'
def _get_alert_handler(self):
alert_handler_class = importutils.import_class(
self.ALERT_HANDLER_CLASS)
alert_handler = alert_handler_class()
return alert_handler
def _get_fake_alert_info(self):
alert_info = {
'1.3.6.1.3.94.1.11.1.3.0': 79,
'1.3.6.1.3.94.1.6.1.20.0': '000192601409',
'1.3.6.1.3.94.1.11.1.7.0': 'topology',
'1.3.6.1.3.94.1.11.1.9.0': 'Symmetrix 000192601409 FastSRP '
'SRP_1 : Remote (SRDF) diagnostic '
'event trace triggered.',
'1.3.6.1.3.94.1.11.1.6.0': '6',
'1.3.6.1.3.94.1.6.1.3.0': 'storage-subsystem',
'1.3.6.1.4.1.1139.3.8888.1.0.0': 'symmetrix',
'1.3.6.1.4.1.1139.3.8888.2.0.0': '1050',
'1.3.6.1.4.1.1139.3.8888.3.0.0': '1051',
'1.3.6.1.4.1.1139.3.8888.4.0.0': 'SRP_1'}
return alert_info
def test_parse_alert_with_all_necessary_info(self):
""" Success flow with all necessary parameters"""
alert_handler_inst = self._get_alert_handler()
alert = self._get_fake_alert_info()
expected_alert_model = {
'alert_id': alert['1.3.6.1.4.1.1139.3.8888.2.0.0'],
'alert_name': 'SYMAPI_AEVENT2_UID_MOD_DIAG_TRACE_TRIG',
'severity': constants.Severity.WARNING,
'category': constants.Category.NOT_SPECIFIED,
'type': constants.EventType.EQUIPMENT_ALARM,
'sequence_number': alert['1.3.6.1.3.94.1.11.1.3.0'],
'serial_number': '000192601409',
'description': alert['1.3.6.1.3.94.1.11.1.9.0'],
'recovery_advice': 'None',
'resource_type': alert['1.3.6.1.3.94.1.6.1.3.0'],
'location': 'Array id=000192601409,'
'Component type=Symmetrix Disk '
'Group,'
'Component name=SRP_1,'
'Event source=symmetrix',
}
context = {}
alert_model = alert_handler_inst.parse_alert(context, alert)
# occur_time depends on current time
# Verify that all other fields are matching
expected_alert_model['occur_time'] = alert_model['occur_time']
self.assertDictEqual(expected_alert_model, alert_model)
def test_parse_alert_without_mandatory_info(self):
""" Error flow with some mandatory parameters missing"""
alert_handler_inst = self._get_alert_handler()
context = {}
alert = self._get_fake_alert_info()
alert['1.3.6.1.3.94.1.11.1.6.0'] = ''
self.assertRaisesRegex(exception.InvalidInput, "Mandatory information "
"connUnitEventSeverity"
" missing",
alert_handler_inst.parse_alert, context, alert)
| 42.533333 | 79 | 0.59326 |
tils import importutils
from delfin import exception
from delfin.common import constants
class AlertHandlerTestCase(unittest.TestCase):
ALERT_HANDLER_CLASS = 'delfin.drivers.dell_emc.vmax.alert_handler' \
'.snmp_alerts.AlertHandler'
def _get_alert_handler(self):
alert_handler_class = importutils.import_class(
self.ALERT_HANDLER_CLASS)
alert_handler = alert_handler_class()
return alert_handler
def _get_fake_alert_info(self):
alert_info = {
'1.3.6.1.3.94.1.11.1.3.0': 79,
'1.3.6.1.3.94.1.6.1.20.0': '000192601409',
'1.3.6.1.3.94.1.11.1.7.0': 'topology',
'1.3.6.1.3.94.1.11.1.9.0': 'Symmetrix 000192601409 FastSRP '
'SRP_1 : Remote (SRDF) diagnostic '
'event trace triggered.',
'1.3.6.1.3.94.1.11.1.6.0': '6',
'1.3.6.1.3.94.1.6.1.3.0': 'storage-subsystem',
'1.3.6.1.4.1.1139.3.8888.1.0.0': 'symmetrix',
'1.3.6.1.4.1.1139.3.8888.2.0.0': '1050',
'1.3.6.1.4.1.1139.3.8888.3.0.0': '1051',
'1.3.6.1.4.1.1139.3.8888.4.0.0': 'SRP_1'}
return alert_info
def test_parse_alert_with_all_necessary_info(self):
alert_handler_inst = self._get_alert_handler()
alert = self._get_fake_alert_info()
expected_alert_model = {
'alert_id': alert['1.3.6.1.4.1.1139.3.8888.2.0.0'],
'alert_name': 'SYMAPI_AEVENT2_UID_MOD_DIAG_TRACE_TRIG',
'severity': constants.Severity.WARNING,
'category': constants.Category.NOT_SPECIFIED,
'type': constants.EventType.EQUIPMENT_ALARM,
'sequence_number': alert['1.3.6.1.3.94.1.11.1.3.0'],
'serial_number': '000192601409',
'description': alert['1.3.6.1.3.94.1.11.1.9.0'],
'recovery_advice': 'None',
'resource_type': alert['1.3.6.1.3.94.1.6.1.3.0'],
'location': 'Array id=000192601409,'
'Component type=Symmetrix Disk '
'Group,'
'Component name=SRP_1,'
'Event source=symmetrix',
}
context = {}
alert_model = alert_handler_inst.parse_alert(context, alert)
expected_alert_model['occur_time'] = alert_model['occur_time']
self.assertDictEqual(expected_alert_model, alert_model)
def test_parse_alert_without_mandatory_info(self):
alert_handler_inst = self._get_alert_handler()
context = {}
alert = self._get_fake_alert_info()
alert['1.3.6.1.3.94.1.11.1.6.0'] = ''
self.assertRaisesRegex(exception.InvalidInput, "Mandatory information "
"connUnitEventSeverity"
" missing",
alert_handler_inst.parse_alert, context, alert)
| true | true |
f71409450650072e4b031dd2e4e4906f4cb8d026 | 12,788 | py | Python | recipes/hdf5/all/conanfile.py | dyndrite/conan-center-index | 106b5c2f532d5129e7ca1997e29e4e105bb3018c | [
"MIT"
] | 1 | 2021-11-11T03:07:13.000Z | 2021-11-11T03:07:13.000Z | recipes/hdf5/all/conanfile.py | dyndrite/conan-center-index | 106b5c2f532d5129e7ca1997e29e4e105bb3018c | [
"MIT"
] | null | null | null | recipes/hdf5/all/conanfile.py | dyndrite/conan-center-index | 106b5c2f532d5129e7ca1997e29e4e105bb3018c | [
"MIT"
] | null | null | null | from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import functools
import os
import textwrap
required_conan_version = ">=1.43.0"
class Hdf5Conan(ConanFile):
name = "hdf5"
description = "HDF5 is a data model, library, and file format for storing and managing data."
license = "BSD-3-Clause"
topics = ("hdf5", "hdf", "data")
homepage = "https://portal.hdfgroup.org/display/HDF5/HDF5"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"enable_cxx": [True, False],
"hl": [True, False],
"threadsafe": [True, False],
"with_zlib": [True, False],
"szip_support": [None, "with_libaec", "with_szip"],
"szip_encoding": [True, False],
"parallel": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"enable_cxx": True,
"hl": True,
"threadsafe": False,
"with_zlib": True,
"szip_support": None,
"szip_encoding": False,
"parallel": False,
}
generators = "cmake"
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
if not self.options.enable_cxx:
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
if self.options.enable_cxx or self.options.hl or (self.settings.os == "Windows" and not self.options.shared):
del self.options.threadsafe
if not bool(self.options.szip_support):
del self.options.szip_encoding
def requirements(self):
if self.options.with_zlib:
self.requires("zlib/1.2.12")
if self.options.szip_support == "with_libaec":
self.requires("libaec/1.0.6")
elif self.options.szip_support == "with_szip":
self.requires("szip/2.1.1")
if self.options.parallel:
self.requires("openmpi/4.1.0")
def validate(self):
if hasattr(self, "settings_build") and tools.cross_building(self, skip_x64_x86=True):
# While building it runs some executables like H5detect
raise ConanInvalidConfiguration("Current recipe doesn't support cross-building (yet)")
if self.options.parallel:
if self.options.enable_cxx:
raise ConanInvalidConfiguration("Parallel and C++ options are mutually exclusive")
if self.options.get_safe("threadsafe", False):
raise ConanInvalidConfiguration("Parallel and Threadsafe options are mutually exclusive")
if self.options.szip_support == "with_szip" and self.options.szip_encoding and \
not self.options["szip"].enable_encoding:
raise ConanInvalidConfiguration("encoding must be enabled in szip dependency (szip:enable_encoding=True)")
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
# Do not force PIC
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
"set (CMAKE_POSITION_INDEPENDENT_CODE ON)", "")
@functools.lru_cache(1)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["HDF5_EXTERNALLY_CONFIGURED"] = True
cmake.definitions["HDF5_EXTERNAL_LIB_PREFIX"] = ""
cmake.definitions["HDF5_USE_FOLDERS"] = False
cmake.definitions["HDF5_NO_PACKAGES"] = True
cmake.definitions["ALLOW_UNSUPPORTED"] = False
if tools.Version(self.version) >= "1.10.6":
cmake.definitions["ONLY_SHARED_LIBS"] = self.options.shared
cmake.definitions["BUILD_STATIC_EXECS"] = False
cmake.definitions["HDF5_ENABLE_COVERAGE"] = False
cmake.definitions["HDF5_ENABLE_USING_MEMCHECKER"] = False
if tools.Version(self.version) >= "1.10.0":
cmake.definitions["HDF5_MEMORY_ALLOC_SANITY_CHECK"] = False
if tools.Version(self.version) >= "1.10.5":
cmake.definitions["HDF5_ENABLE_PREADWRITE"] = True
cmake.definitions["HDF5_ENABLE_DEPRECATED_SYMBOLS"] = True
cmake.definitions["HDF5_BUILD_GENERATORS"] = False
cmake.definitions["HDF5_ENABLE_TRACE"] = False
if self.settings.build_type == "Debug":
cmake.definitions["HDF5_ENABLE_INSTRUMENT"] = False # Option?
cmake.definitions["HDF5_ENABLE_PARALLEL"] = self.options.parallel
cmake.definitions["HDF5_ENABLE_Z_LIB_SUPPORT"] = self.options.with_zlib
cmake.definitions["HDF5_ENABLE_SZIP_SUPPORT"] = bool(self.options.szip_support)
if bool(self.options.szip_support):
cmake.definitions["CONAN_SZIP_LIBNAME"] = self._get_szip_lib() # this variable is added by conanize-link-szip*.patch
cmake.definitions["HDF5_ENABLE_SZIP_ENCODING"] = self.options.get_safe("szip_encoding", False)
cmake.definitions["HDF5_PACKAGE_EXTLIBS"] = False
cmake.definitions["HDF5_ENABLE_THREADSAFE"] = self.options.get_safe("threadsafe", False)
cmake.definitions["HDF5_ENABLE_DEBUG_APIS"] = False # Option?
cmake.definitions["BUILD_TESTING"] = False
cmake.definitions["HDF5_INSTALL_INCLUDE_DIR"] = os.path.join(self.package_folder, "include", "hdf5")
cmake.definitions["HDF5_BUILD_TOOLS"] = False
cmake.definitions["HDF5_BUILD_EXAMPLES"] = False
cmake.definitions["HDF5_BUILD_HL_LIB"] = self.options.hl
cmake.definitions["HDF5_BUILD_FORTRAN"] = False
cmake.definitions["HDF5_BUILD_CPP_LIB"] = self.options.enable_cxx
if tools.Version(self.version) >= "1.10.0":
cmake.definitions["HDF5_BUILD_JAVA"] = False
cmake.configure(build_folder=self._build_subfolder)
return cmake
def _get_szip_lib(self):
return {
"with_libaec": "libaec",
"with_szip": "szip",
}.get(str(self.options.szip_support))
def _components(self):
hdf5_requirements = []
if self.options.with_zlib:
hdf5_requirements.append("zlib::zlib")
if self.options.szip_support == "with_libaec":
hdf5_requirements.append("libaec::libaec")
elif self.options.szip_support == "with_szip":
hdf5_requirements.append("szip::szip")
if self.options.parallel:
hdf5_requirements.append("openmpi::openmpi")
return {
"hdf5_c": {"component": "C", "alias_target": "hdf5", "requirements": hdf5_requirements},
"hdf5_hl": {"component": "HL", "alias_target": "hdf5_hl", "requirements": ["hdf5_c"]},
"hdf5_cpp": {"component": "CXX", "alias_target": "hdf5_cpp", "requirements": ["hdf5_c"]},
"hdf5_hl_cpp": {"component": "HL_CXX", "alias_target": "hdf5_hl_cpp", "requirements": ["hdf5_c", "hdf5_cpp", "hdf5_hl"]},
}
@staticmethod
def _create_cmake_module_alias_targets(module_file, targets, is_parallel):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent("""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""".format(alias=alias, aliased=aliased))
# add the additional hdf5_hl_cxx target when both CXX and HL components are specified
content += textwrap.dedent("""\
if(TARGET HDF5::HL AND TARGET HDF5::CXX AND NOT TARGET hdf5::hdf5_hl_cpp)
add_library(hdf5::hdf5_hl_cpp INTERFACE IMPORTED)
set_property(TARGET hdf5::hdf5_hl_cpp PROPERTY INTERFACE_LINK_LIBRARIES HDF5::HL_CXX)
endif()
""")
content += textwrap.dedent("set(HDF5_IS_PARALLEL {})".format("ON" if is_parallel else "OFF"))
tools.save(module_file, content)
@property
def _module_file_rel_path(self):
return os.path.join("lib", "cmake",
"conan-official-{}-targets.cmake".format(self.name))
def package(self):
self.copy("COPYING", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
os.remove(os.path.join(self.package_folder, "lib", "libhdf5.settings"))
# Mimic the official CMake FindHDF5 targets. HDF5::HDF5 refers to the global target as per conan,
# but component targets have a lower case namespace prefix. hdf5::hdf5 refers to the C library only
components = self._components()
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_file_rel_path),
{"hdf5::{}".format(component["alias_target"]): "HDF5::{}".format(component["component"]) for component in components.values()},
self.options.get_safe("parallel", False)
)
def package_info(self):
def add_component(component_name, component, alias_target, requirements):
def _config_libname(lib):
if self.settings.os == "Windows" and self.settings.compiler != "gcc" and not self.options.shared:
lib = "lib" + lib
if self.settings.build_type == "Debug":
debug_postfix = "_D" if self.settings.os == "Windows" else "_debug"
return lib + debug_postfix
# See config/cmake_ext_mod/HDFMacros.cmake
return lib
self.cpp_info.components[component_name].set_property("cmake_target_name", f"hdf5::{alias_target}")
self.cpp_info.components[component_name].set_property("pkg_config_name", alias_target)
self.cpp_info.components[component_name].libs = [_config_libname(alias_target)]
self.cpp_info.components[component_name].requires = requirements
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.components[component_name].names["cmake_find_package"] = component
self.cpp_info.components[component_name].names["cmake_find_package_multi"] = component
self.cpp_info.components[component_name].build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.components[component_name].build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
self.cpp_info.set_property("cmake_find_mode", "both")
self.cpp_info.set_property("cmake_file_name", "HDF5")
self.cpp_info.set_property("cmake_target_name", "HDF5::HDF5")
self.cpp_info.set_property("pkg_config_name", "hdf5-all-do-not-use") # to avoid conflict with hdf5_c component
components = self._components()
add_component("hdf5_c", **components["hdf5_c"])
self.cpp_info.components["hdf5_c"].includedirs.append(os.path.join("include", "hdf5"))
if self.settings.os == "Linux":
self.cpp_info.components["hdf5_c"].system_libs.extend(["dl", "m"])
if self.options.get_safe("threadsafe"):
self.cpp_info.components["hdf5_c"].system_libs.append("pthread")
if self.options.shared:
self.cpp_info.components["hdf5_c"].defines.append("H5_BUILT_AS_DYNAMIC_LIB")
if self.options.get_safe("enable_cxx"):
add_component("hdf5_cpp", **components["hdf5_cpp"])
if self.options.get_safe("hl"):
add_component("hdf5_hl", **components["hdf5_hl"])
if self.options.get_safe("enable_cxx"):
add_component("hdf5_hl_cpp", **components["hdf5_hl_cpp"])
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.names["cmake_find_package"] = "HDF5"
self.cpp_info.names["cmake_find_package_multi"] = "HDF5"
| 48.075188 | 139 | 0.64451 | from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import functools
import os
import textwrap
required_conan_version = ">=1.43.0"
class Hdf5Conan(ConanFile):
name = "hdf5"
description = "HDF5 is a data model, library, and file format for storing and managing data."
license = "BSD-3-Clause"
topics = ("hdf5", "hdf", "data")
homepage = "https://portal.hdfgroup.org/display/HDF5/HDF5"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"enable_cxx": [True, False],
"hl": [True, False],
"threadsafe": [True, False],
"with_zlib": [True, False],
"szip_support": [None, "with_libaec", "with_szip"],
"szip_encoding": [True, False],
"parallel": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"enable_cxx": True,
"hl": True,
"threadsafe": False,
"with_zlib": True,
"szip_support": None,
"szip_encoding": False,
"parallel": False,
}
generators = "cmake"
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
if not self.options.enable_cxx:
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
if self.options.enable_cxx or self.options.hl or (self.settings.os == "Windows" and not self.options.shared):
del self.options.threadsafe
if not bool(self.options.szip_support):
del self.options.szip_encoding
def requirements(self):
if self.options.with_zlib:
self.requires("zlib/1.2.12")
if self.options.szip_support == "with_libaec":
self.requires("libaec/1.0.6")
elif self.options.szip_support == "with_szip":
self.requires("szip/2.1.1")
if self.options.parallel:
self.requires("openmpi/4.1.0")
def validate(self):
if hasattr(self, "settings_build") and tools.cross_building(self, skip_x64_x86=True):
raise ConanInvalidConfiguration("Current recipe doesn't support cross-building (yet)")
if self.options.parallel:
if self.options.enable_cxx:
raise ConanInvalidConfiguration("Parallel and C++ options are mutually exclusive")
if self.options.get_safe("threadsafe", False):
raise ConanInvalidConfiguration("Parallel and Threadsafe options are mutually exclusive")
if self.options.szip_support == "with_szip" and self.options.szip_encoding and \
not self.options["szip"].enable_encoding:
raise ConanInvalidConfiguration("encoding must be enabled in szip dependency (szip:enable_encoding=True)")
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
# Do not force PIC
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
"set (CMAKE_POSITION_INDEPENDENT_CODE ON)", "")
@functools.lru_cache(1)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["HDF5_EXTERNALLY_CONFIGURED"] = True
cmake.definitions["HDF5_EXTERNAL_LIB_PREFIX"] = ""
cmake.definitions["HDF5_USE_FOLDERS"] = False
cmake.definitions["HDF5_NO_PACKAGES"] = True
cmake.definitions["ALLOW_UNSUPPORTED"] = False
if tools.Version(self.version) >= "1.10.6":
cmake.definitions["ONLY_SHARED_LIBS"] = self.options.shared
cmake.definitions["BUILD_STATIC_EXECS"] = False
cmake.definitions["HDF5_ENABLE_COVERAGE"] = False
cmake.definitions["HDF5_ENABLE_USING_MEMCHECKER"] = False
if tools.Version(self.version) >= "1.10.0":
cmake.definitions["HDF5_MEMORY_ALLOC_SANITY_CHECK"] = False
if tools.Version(self.version) >= "1.10.5":
cmake.definitions["HDF5_ENABLE_PREADWRITE"] = True
cmake.definitions["HDF5_ENABLE_DEPRECATED_SYMBOLS"] = True
cmake.definitions["HDF5_BUILD_GENERATORS"] = False
cmake.definitions["HDF5_ENABLE_TRACE"] = False
if self.settings.build_type == "Debug":
cmake.definitions["HDF5_ENABLE_INSTRUMENT"] = False # Option?
cmake.definitions["HDF5_ENABLE_PARALLEL"] = self.options.parallel
cmake.definitions["HDF5_ENABLE_Z_LIB_SUPPORT"] = self.options.with_zlib
cmake.definitions["HDF5_ENABLE_SZIP_SUPPORT"] = bool(self.options.szip_support)
if bool(self.options.szip_support):
cmake.definitions["CONAN_SZIP_LIBNAME"] = self._get_szip_lib() # this variable is added by conanize-link-szip*.patch
cmake.definitions["HDF5_ENABLE_SZIP_ENCODING"] = self.options.get_safe("szip_encoding", False)
cmake.definitions["HDF5_PACKAGE_EXTLIBS"] = False
cmake.definitions["HDF5_ENABLE_THREADSAFE"] = self.options.get_safe("threadsafe", False)
cmake.definitions["HDF5_ENABLE_DEBUG_APIS"] = False # Option?
cmake.definitions["BUILD_TESTING"] = False
cmake.definitions["HDF5_INSTALL_INCLUDE_DIR"] = os.path.join(self.package_folder, "include", "hdf5")
cmake.definitions["HDF5_BUILD_TOOLS"] = False
cmake.definitions["HDF5_BUILD_EXAMPLES"] = False
cmake.definitions["HDF5_BUILD_HL_LIB"] = self.options.hl
cmake.definitions["HDF5_BUILD_FORTRAN"] = False
cmake.definitions["HDF5_BUILD_CPP_LIB"] = self.options.enable_cxx
if tools.Version(self.version) >= "1.10.0":
cmake.definitions["HDF5_BUILD_JAVA"] = False
cmake.configure(build_folder=self._build_subfolder)
return cmake
def _get_szip_lib(self):
return {
"with_libaec": "libaec",
"with_szip": "szip",
}.get(str(self.options.szip_support))
def _components(self):
hdf5_requirements = []
if self.options.with_zlib:
hdf5_requirements.append("zlib::zlib")
if self.options.szip_support == "with_libaec":
hdf5_requirements.append("libaec::libaec")
elif self.options.szip_support == "with_szip":
hdf5_requirements.append("szip::szip")
if self.options.parallel:
hdf5_requirements.append("openmpi::openmpi")
return {
"hdf5_c": {"component": "C", "alias_target": "hdf5", "requirements": hdf5_requirements},
"hdf5_hl": {"component": "HL", "alias_target": "hdf5_hl", "requirements": ["hdf5_c"]},
"hdf5_cpp": {"component": "CXX", "alias_target": "hdf5_cpp", "requirements": ["hdf5_c"]},
"hdf5_hl_cpp": {"component": "HL_CXX", "alias_target": "hdf5_hl_cpp", "requirements": ["hdf5_c", "hdf5_cpp", "hdf5_hl"]},
}
@staticmethod
def _create_cmake_module_alias_targets(module_file, targets, is_parallel):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent("""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""".format(alias=alias, aliased=aliased))
# add the additional hdf5_hl_cxx target when both CXX and HL components are specified
content += textwrap.dedent("""\
if(TARGET HDF5::HL AND TARGET HDF5::CXX AND NOT TARGET hdf5::hdf5_hl_cpp)
add_library(hdf5::hdf5_hl_cpp INTERFACE IMPORTED)
set_property(TARGET hdf5::hdf5_hl_cpp PROPERTY INTERFACE_LINK_LIBRARIES HDF5::HL_CXX)
endif()
""")
content += textwrap.dedent("set(HDF5_IS_PARALLEL {})".format("ON" if is_parallel else "OFF"))
tools.save(module_file, content)
@property
def _module_file_rel_path(self):
return os.path.join("lib", "cmake",
"conan-official-{}-targets.cmake".format(self.name))
def package(self):
self.copy("COPYING", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
os.remove(os.path.join(self.package_folder, "lib", "libhdf5.settings"))
# Mimic the official CMake FindHDF5 targets. HDF5::HDF5 refers to the global target as per conan,
# but component targets have a lower case namespace prefix. hdf5::hdf5 refers to the C library only
components = self._components()
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_file_rel_path),
{"hdf5::{}".format(component["alias_target"]): "HDF5::{}".format(component["component"]) for component in components.values()},
self.options.get_safe("parallel", False)
)
def package_info(self):
def add_component(component_name, component, alias_target, requirements):
def _config_libname(lib):
if self.settings.os == "Windows" and self.settings.compiler != "gcc" and not self.options.shared:
lib = "lib" + lib
if self.settings.build_type == "Debug":
debug_postfix = "_D" if self.settings.os == "Windows" else "_debug"
return lib + debug_postfix
# See config/cmake_ext_mod/HDFMacros.cmake
return lib
self.cpp_info.components[component_name].set_property("cmake_target_name", f"hdf5::{alias_target}")
self.cpp_info.components[component_name].set_property("pkg_config_name", alias_target)
self.cpp_info.components[component_name].libs = [_config_libname(alias_target)]
self.cpp_info.components[component_name].requires = requirements
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.components[component_name].names["cmake_find_package"] = component
self.cpp_info.components[component_name].names["cmake_find_package_multi"] = component
self.cpp_info.components[component_name].build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.components[component_name].build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
self.cpp_info.set_property("cmake_find_mode", "both")
self.cpp_info.set_property("cmake_file_name", "HDF5")
self.cpp_info.set_property("cmake_target_name", "HDF5::HDF5")
self.cpp_info.set_property("pkg_config_name", "hdf5-all-do-not-use") # to avoid conflict with hdf5_c component
components = self._components()
add_component("hdf5_c", **components["hdf5_c"])
self.cpp_info.components["hdf5_c"].includedirs.append(os.path.join("include", "hdf5"))
if self.settings.os == "Linux":
self.cpp_info.components["hdf5_c"].system_libs.extend(["dl", "m"])
if self.options.get_safe("threadsafe"):
self.cpp_info.components["hdf5_c"].system_libs.append("pthread")
if self.options.shared:
self.cpp_info.components["hdf5_c"].defines.append("H5_BUILT_AS_DYNAMIC_LIB")
if self.options.get_safe("enable_cxx"):
add_component("hdf5_cpp", **components["hdf5_cpp"])
if self.options.get_safe("hl"):
add_component("hdf5_hl", **components["hdf5_hl"])
if self.options.get_safe("enable_cxx"):
add_component("hdf5_hl_cpp", **components["hdf5_hl_cpp"])
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.names["cmake_find_package"] = "HDF5"
self.cpp_info.names["cmake_find_package_multi"] = "HDF5"
| true | true |
f71409decc2bf9a049085abd9f2ece5b6c0c08de | 1,143 | py | Python | event_sourced_bank/domain_model.py | sfinnie/event_sourced_bank | dcec864724e85ac8049a377ee2f6e2c05b1b6a2c | [
"MIT"
] | 5 | 2022-02-17T05:54:49.000Z | 2022-03-09T20:13:21.000Z | event_sourced_bank/domain_model.py | sfinnie/event_sourced_bank | dcec864724e85ac8049a377ee2f6e2c05b1b6a2c | [
"MIT"
] | 2 | 2022-02-17T14:19:16.000Z | 2022-02-17T14:21:31.000Z | event_sourced_bank/domain_model.py | sfinnie/event_sourced_bank | dcec864724e85ac8049a377ee2f6e2c05b1b6a2c | [
"MIT"
] | 1 | 2022-03-03T05:03:23.000Z | 2022-03-03T05:03:23.000Z | from eventsourcing.domain import Aggregate, event
from uuid import uuid5, NAMESPACE_URL
class Account(Aggregate):
"""A simple-as-can-be bank account"""
@event('Created')
def __init__(self):
self.balance = 0
@event('Credited')
def credit(self, amount: int):
self.balance += amount
@event('Debited')
def debit(self, amount: int):
self.balance -= amount
class Ledger(Aggregate):
"""A simple-as-can-be Ledger to track net movements across all accounts"""
def __init__(self, name):
self.name = name
self.transaction_count = 0
self.balance = 0
@classmethod
def create_id(cls, name):
"""Enable predictable IDs so that a Ledger can be retrieved
using its name - even if its ID isn't known
"""
return uuid5(NAMESPACE_URL, f'/ledgers/{name}')
@event('TransactionAdded')
def add_transaction(self, amount: int):
self.transaction_count += 1
self.balance += amount
def get_balance(self):
return self.balance
def get_transaction_count(self):
return self.transaction_count
| 25.4 | 78 | 0.637795 | from eventsourcing.domain import Aggregate, event
from uuid import uuid5, NAMESPACE_URL
class Account(Aggregate):
@event('Created')
def __init__(self):
self.balance = 0
@event('Credited')
def credit(self, amount: int):
self.balance += amount
@event('Debited')
def debit(self, amount: int):
self.balance -= amount
class Ledger(Aggregate):
def __init__(self, name):
self.name = name
self.transaction_count = 0
self.balance = 0
@classmethod
def create_id(cls, name):
return uuid5(NAMESPACE_URL, f'/ledgers/{name}')
@event('TransactionAdded')
def add_transaction(self, amount: int):
self.transaction_count += 1
self.balance += amount
def get_balance(self):
return self.balance
def get_transaction_count(self):
return self.transaction_count
| true | true |
f71409fcadaa0c4d31744b8e38f76955efffacf1 | 14,688 | py | Python | notion_export_enhancer/enhancer.py | Cobertos/notion-export-enhancer | 07a34d4b3daeb1ec69cd4253c089ba4d9dc5bbc3 | [
"MIT"
] | 17 | 2021-01-28T11:03:47.000Z | 2022-02-10T22:51:44.000Z | notion_export_enhancer/enhancer.py | Cobertos/notion-export-enhancer | 07a34d4b3daeb1ec69cd4253c089ba4d9dc5bbc3 | [
"MIT"
] | 3 | 2021-03-21T02:35:24.000Z | 2021-10-05T07:10:22.000Z | notion_export_enhancer/enhancer.py | Cobertos/notion-export-enhancer | 07a34d4b3daeb1ec69cd4253c089ba4d9dc5bbc3 | [
"MIT"
] | 4 | 2021-03-21T03:39:36.000Z | 2022-01-19T07:05:14.000Z | """
Takes a [Notion.so](https://notion.so) export .zip and enhances it
"""
import tempfile
import sys
import os
import time
import re
import argparse
import zipfile
import urllib.parse
from datetime import datetime
from pathlib import Path
import backoff
import requests
from emoji_extractor.extract import Extractor as EmojiExtractor
from notion.client import NotionClient
from notion.block import PageBlock
def noteNameRewrite(nCl, originalNameNoExt):
"""
Takes original name (with no extension) and renames it using the Notion ID
and data from Notion itself
* Removes the Notion ID
* Looks up the Notion ID for it's icon, and appends if we can find it
"""
match = re.search(r"(.+?) ([0-9a-f]{32})$", originalNameNoExt)
if not match:
return (None, None, None)
notionId = match[2]
# Query notion for the ID
#print(f"Fetching Notion ID '{notionId}' for '{originalNameNoExt}'")
try:
pageBlock = nCl.get_block(notionId)
except requests.exceptions.HTTPError:
print(f"Failed to retrieve ID {notionId}")
return (None, None, None)
# The ID might not be a PageBlock (like when a note with no child PageBlocks
# has an image in it, generating a folder, Notion uses the ID of the first
# ImageBlock, maybe a bug on Notion's end? lol)
if not isinstance(pageBlock, PageBlock):
print(f"Block at ID {notionId}, was not PageBlock. Was {type(pageBlock).__name__}")
if hasattr(pageBlock, 'parent') and pageBlock.parent is not None:
# Try traversing up the parents for the first page
while hasattr(pageBlock, 'parent') and not isinstance(pageBlock, PageBlock):
pageBlock = pageBlock.parent
if isinstance(pageBlock, PageBlock):
print(f"Using some .parent as PageBlock")
elif hasattr(pageBlock, 'children') and pageBlock.children is not None:
# Try to find a PageBlock in the children, but only use if one single one exists
pageBlockChildren = [c for c in pageBlock.children if isinstance(c, PageBlock)]
if len(pageBlockChildren) != 1:
print(f"Ambiguous .children, contained {len(pageBlockChildren)} chlidren PageBlocks")
else:
print(f"Using .children[0] as PageBlock")
pageBlock = pageBlockChildren[0]
if not isinstance(pageBlock, PageBlock):
print(f"Failed to retrieve PageBlock for ID {notionId}")
return (None, None, None)
#print(f"Found parent '{type(pageBlock).__name__}' instead")
# Check for name truncation
newName = match[1]
if len(match[1]) == 50:
# Use full name instead, invalids replaced with " ", like the normal export
# TODO: These are just Windows reserved characters
# TODO: 200 was just a value to stop Windows from complaining
newName = re.sub(r"[\\/?:*\"<>|]", " ", pageBlock.title)
if len(newName) > 200:
print(f"'{newName}' too long, truncating to 200")
newName = newName[0:200]
# Add icon to the front if it's there and usable
icon = pageBlock.icon
if icon and EmojiExtractor().big_regex.match(icon): # A full match of a single emoji, might be None or an https://aws.amazon uploaded icon
newName = f"{icon} {newName}"
# Also get the times to set the file to
createdTime = datetime.fromtimestamp(int(pageBlock._get_record_data()["created_time"])/1000)
lastEditedTime = datetime.fromtimestamp(int(pageBlock._get_record_data()["last_edited_time"])/1000)
return (newName, createdTime, lastEditedTime)
class NotionExportRenamer:
"""
Holds state information for renaming a single Notion.so export. Allows it to avoid
naming collisions and store other state
"""
def __init__(self, notionClient, rootPath):
self.notionClient = notionClient
self.rootPath = rootPath
# Dict containing all the paths we've renamed and what they were renamed to
# (plus createdtime and lastEditedTime). Strings with relative directories to
# rootPath mapped to 3 tuples returned from noteNameRewrite
self._renameCache = {}
# Dict containing keys where it is an unrenamed path with the last part being
# renamed mapped to True. Used to see if other files in the folder might
# have the same name and to act accordingly
self._collisionCache = {}
def renameAndTimesWithNotion(self, pathToRename):
"""
Takes an original on file-system path and rewrites _just the basename_. It
collects rename operations for speed and collision prevention (as some renames
will cause the same name to occur)
@param {string} realPath The path to rename the basename of. Must point to an
actual unrenamed file/folder on disk rooted at self.rootPath so we can scan around it
@returns {tuple} 3 tuple of new name, created time and modified time
"""
if pathToRename in self._renameCache:
return self._renameCache[pathToRename]
path, name = os.path.split(pathToRename)
nameNoExt, ext = os.path.splitext(name)
newNameNoExt, createdTime, lastEditedTime = noteNameRewrite(self.notionClient, nameNoExt)
if not newNameNoExt: # No rename happened, probably no ID in the name or not an .md file
self._renameCache[pathToRename] = (name, None, None)
else:
# Merge files into folders in path at same name if that folder exists
if ext == '.md':
p = Path(os.path.join(self.rootPath, path, nameNoExt))
if p.exists() and p.is_dir():
# NOTE: newNameNoExt can contain a '/' for path joining later!
newNameNoExt = os.path.join(newNameNoExt, "!index")
# Check to see if name collides
if os.path.join(path, newNameNoExt) in self._collisionCache:
# If it does, try progressive (i) until a new one is found
i = 1
collidingNameNoExt = newNameNoExt
while os.path.join(path, newNameNoExt) in self._collisionCache:
newNameNoExt = f"{collidingNameNoExt} ({i})"
i += 1
self._renameCache[pathToRename] = (f"{newNameNoExt}{ext}", createdTime, lastEditedTime)
self._collisionCache[os.path.join(path, newNameNoExt)] = True
return self._renameCache[pathToRename]
def renameWithNotion(self, pathToRename):
"""
Takes an original on file-system path and rewrites _just the basename_. It
collects rename operations for speed and collision prevention (as some renames
will cause the same name to occur)
@param {string} pathToRename The path to rename the basename of. Must point to an
actual unrenamed file/folder on disk rooted at self.rootPath so we can scan around it
@returns {string} The new name
"""
return self.renameAndTimesWithNotion(pathToRename)[0]
def renamePathWithNotion(self, pathToRename):
"""
Renames all parts of a path
@param {string} pathToRename A real path on disk to a file or folder root at
self.rootPath. All pieces of the path will be renamed
"""
pathToRenameSplit = re.split(r"[\\/]", pathToRename)
paths = [os.path.join(*pathToRenameSplit[0:rpc + 1]) for rpc in range(len(pathToRenameSplit))]
return os.path.join(*[self.renameWithNotion(rp) for rp in paths])
def renamePathAndTimesWithNotion(self, pathToRename):
"""
Renames all parts of a path and return the created and lastEditedTime for the last
part of the path (the file)
@param {string} pathToRename A real path on disk to a file or folder root at
self.rootPath. All pieces of the path will be renamed
"""
newPath = self.renamePathWithNotion(os.path.dirname(pathToRename))
newName, createdTime, lastEditedTime = self.renameAndTimesWithNotion(pathToRename)
return (os.path.join(newPath, newName), createdTime, lastEditedTime)
def mdFileRewrite(renamer, mdFilePath, mdFileContents=None, removeTopH1=False, rewritePaths=False):
"""
Takes a Notion exported md file and rewrites parts of it
@param {string} mdFilePath String to the markdown file that's being editted, rooted at
self.rootPath
@param {string} [mdFileContents=None] The contents of the markdown file, if not provided
we will read it manually
@param {boolean} [removeTopH1=False] Remove the title on the first line of the MD file?
@param {boolean} [rewritePaths=False] Rewrite the relative paths in the MD file (images and links)
using Notion file name rewriting
"""
if not mdFileContents:
raise NotImplementedError("TODO: Not passing mdFileContents is not implemented... please pass it ;w;")
newMDFileContents = mdFileContents
if removeTopH1:
lines = mdFileContents.split("\n")
newMDFileContents = "\n".join(lines[1:])
if rewritePaths:
# Notion link/images use relative paths to other notes, which we can't known without
# consulting the file tree and renaming (to handle duplicates and such)
# Notion links are also URL encoded
# Can't use finditer because we modify the string each time...
searchStartIndex = 0
while True:
m = re.search(r"!?\[.+?\]\(([\w\d\-._~:/?=#%\]\[@!$&'\(\)*+,;]+?)\)", newMDFileContents[searchStartIndex:])
if not m:
break
if re.search(r":/", m.group(1)):
searchStartIndex = searchStartIndex + m.end(1)
continue # Not a local file path
relTargetFilePath = urllib.parse.unquote(m.group(1))
# Convert the current MD file path and link target path to the renamed version
# (also taking into account potentially mdFilePath renames moving the directory)
mdDirPath = os.path.dirname(mdFilePath)
newTargetFilePath = renamer.renamePathWithNotion(os.path.join(mdDirPath, relTargetFilePath))
newMDDirPath = os.path.dirname(renamer.renamePathWithNotion(mdFilePath))
# Find the relative path to the newly converted paths for both files
newRelTargetFilePath = os.path.relpath(newTargetFilePath, newMDDirPath)
# Convert back to the way markdown expects the link to be
newRelTargetFilePath = re.sub(r"\\", "/", newRelTargetFilePath)
newRelTargetFilePath = urllib.parse.quote(newRelTargetFilePath)
# Replace the path in the original string with the new relative renamed
# target path
newMDFileContents = newMDFileContents[0:m.start(1) + searchStartIndex] + newRelTargetFilePath + newMDFileContents[m.end(1) + searchStartIndex:]
searchStartIndex = searchStartIndex + m.start(1) + len(newRelTargetFilePath)
return newMDFileContents
def rewriteNotionZip(notionClient, zipPath, outputPath=".", removeTopH1=False, rewritePaths=True):
"""
Takes a Notion .zip and prettifies the whole thing
* Removes all Notion IDs from end of names, folders and files
* Add icon to the start of folder/file name if Unicode character
* For files had content in Notion, move them inside the folder, and set the
name to something that will sort to the top
* Fix links inside of files
* Optionally remove titles at the tops of files
@param {NotionClient} notionClient The NotionClient to use to query Notion with
@param {string} zipPath The path to the Notion zip
@param {string} [outputPath="."] Optional output path, otherwise will use cwd
@param {boolean} [removeTopH1=False] To remove titles at the top of all the md files
@param {boolean} [rewritePaths=True] To rewrite all the links and images in the Markdown files too
@returns {string} Path to the output zip file
"""
with tempfile.TemporaryDirectory() as tmpDir:
# Unpack the whole thing first (probably faster than traversing it zipped, like with tar files)
print(f"Extracting '{zipPath}' temporarily...")
with zipfile.ZipFile(zipPath) as zf:
zf.extractall(tmpDir)
# Make new zip to begin filling
zipName = os.path.basename(zipPath)
newZipName = f"{zipName}.formatted"
newZipPath = os.path.join(outputPath, newZipName)
with zipfile.ZipFile(newZipPath, 'w', zipfile.ZIP_DEFLATED) as zf:
#Traverse over the files, renaming, modifying, and rewriting back to the zip
renamer = NotionExportRenamer(notionClient, tmpDir)
for tmpWalkDir, dirs, files in os.walk(tmpDir):
walkDir = os.path.relpath(tmpWalkDir, tmpDir)
for name in files:
realPath = os.path.join(tmpWalkDir, name)
relPath = os.path.join("" if walkDir == "." else walkDir, name) # Prevent paths starting with .\\ which, when written to the tar, do annoying things
# print(f"Reading '{root}' '{name}'")
# Rewrite the current path and get the times from Notion
print("---")
print(f"Working on '{relPath}'")
newPath, createdTime, lastEditedTime = renamer.renamePathAndTimesWithNotion(relPath)
if os.path.splitext(name)[1] == ".md":
# Grab the data from the file if md file
with open(realPath, "r", encoding='utf-8') as f:
mdFileData = f.read()
mdFileData = mdFileRewrite(renamer, relPath, mdFileContents=mdFileData, removeTopH1=removeTopH1, rewritePaths=rewritePaths)
print(f"Writing as '{newPath}' with time '{lastEditedTime}'")
zi = zipfile.ZipInfo(newPath, lastEditedTime.timetuple())
zf.writestr(zi, mdFileData)
else:
print(f"Writing as '{newPath}' with time from original export (not an .md file)")
zf.write(realPath, newPath)
return newZipPath
def cli(argv):
"""
CLI entrypoint, takes CLI arguments array
"""
parser = argparse.ArgumentParser(description='Prettifies Notion .zip exports')
parser.add_argument('token_v2', type=str,
help='the token for your Notion.so session')
parser.add_argument('zip_path', type=str,
help='the path to the Notion exported .zip file')
parser.add_argument('--output-path', action='store', type=str, default=".",
help='The path to output to, defaults to cwd')
parser.add_argument('--remove-title', action='store_true',
help='Removes the title that Notion adds. H1s at the top of every file')
parser.add_argument('--rewrite-paths', action='store_false', default=True,
help='Rewrite the paths in the Markdown files themselves to match file renaming')
args = parser.parse_args(argv)
startTime = time.time()
nCl = NotionClient(token_v2=args.token_v2)
nCl.get_block = backoff.on_exception(backoff.expo,
requests.exceptions.HTTPError,
max_tries=5,
)(nCl.get_block)
outFileName = rewriteNotionZip(nCl, args.zip_path, outputPath=args.output_path,
removeTopH1=args.remove_title, rewritePaths=args.rewrite_paths)
print("--- Finished in %s seconds ---" % (time.time() - startTime))
print(f"Output file written as '{outFileName}'")
if __name__ == "__main__":
cli(sys.argv[1:])
| 45.757009 | 158 | 0.701593 |
import tempfile
import sys
import os
import time
import re
import argparse
import zipfile
import urllib.parse
from datetime import datetime
from pathlib import Path
import backoff
import requests
from emoji_extractor.extract import Extractor as EmojiExtractor
from notion.client import NotionClient
from notion.block import PageBlock
def noteNameRewrite(nCl, originalNameNoExt):
match = re.search(r"(.+?) ([0-9a-f]{32})$", originalNameNoExt)
if not match:
return (None, None, None)
notionId = match[2]
try:
pageBlock = nCl.get_block(notionId)
except requests.exceptions.HTTPError:
print(f"Failed to retrieve ID {notionId}")
return (None, None, None)
if not isinstance(pageBlock, PageBlock):
print(f"Block at ID {notionId}, was not PageBlock. Was {type(pageBlock).__name__}")
if hasattr(pageBlock, 'parent') and pageBlock.parent is not None:
# Try traversing up the parents for the first page
while hasattr(pageBlock, 'parent') and not isinstance(pageBlock, PageBlock):
pageBlock = pageBlock.parent
if isinstance(pageBlock, PageBlock):
print(f"Using some .parent as PageBlock")
elif hasattr(pageBlock, 'children') and pageBlock.children is not None:
# Try to find a PageBlock in the children, but only use if one single one exists
pageBlockChildren = [c for c in pageBlock.children if isinstance(c, PageBlock)]
if len(pageBlockChildren) != 1:
print(f"Ambiguous .children, contained {len(pageBlockChildren)} chlidren PageBlocks")
else:
print(f"Using .children[0] as PageBlock")
pageBlock = pageBlockChildren[0]
if not isinstance(pageBlock, PageBlock):
print(f"Failed to retrieve PageBlock for ID {notionId}")
return (None, None, None)
#print(f"Found parent '{type(pageBlock).__name__}' instead")
# Check for name truncation
newName = match[1]
if len(match[1]) == 50:
# Use full name instead, invalids replaced with " ", like the normal export
# TODO: These are just Windows reserved characters
# TODO: 200 was just a value to stop Windows from complaining
newName = re.sub(r"[\\/?:*\"<>|]", " ", pageBlock.title)
if len(newName) > 200:
print(f"'{newName}' too long, truncating to 200")
newName = newName[0:200]
# Add icon to the front if it's there and usable
icon = pageBlock.icon
if icon and EmojiExtractor().big_regex.match(icon): # A full match of a single emoji, might be None or an https://aws.amazon uploaded icon
newName = f"{icon} {newName}"
# Also get the times to set the file to
createdTime = datetime.fromtimestamp(int(pageBlock._get_record_data()["created_time"])/1000)
lastEditedTime = datetime.fromtimestamp(int(pageBlock._get_record_data()["last_edited_time"])/1000)
return (newName, createdTime, lastEditedTime)
class NotionExportRenamer:
def __init__(self, notionClient, rootPath):
self.notionClient = notionClient
self.rootPath = rootPath
# Dict containing all the paths we've renamed and what they were renamed to
# (plus createdtime and lastEditedTime). Strings with relative directories to
# rootPath mapped to 3 tuples returned from noteNameRewrite
self._renameCache = {}
# Dict containing keys where it is an unrenamed path with the last part being
# renamed mapped to True. Used to see if other files in the folder might
# have the same name and to act accordingly
self._collisionCache = {}
def renameAndTimesWithNotion(self, pathToRename):
if pathToRename in self._renameCache:
return self._renameCache[pathToRename]
path, name = os.path.split(pathToRename)
nameNoExt, ext = os.path.splitext(name)
newNameNoExt, createdTime, lastEditedTime = noteNameRewrite(self.notionClient, nameNoExt)
if not newNameNoExt: # No rename happened, probably no ID in the name or not an .md file
self._renameCache[pathToRename] = (name, None, None)
else:
# Merge files into folders in path at same name if that folder exists
if ext == '.md':
p = Path(os.path.join(self.rootPath, path, nameNoExt))
if p.exists() and p.is_dir():
# NOTE: newNameNoExt can contain a '/' for path joining later!
newNameNoExt = os.path.join(newNameNoExt, "!index")
# Check to see if name collides
if os.path.join(path, newNameNoExt) in self._collisionCache:
# If it does, try progressive (i) until a new one is found
i = 1
collidingNameNoExt = newNameNoExt
while os.path.join(path, newNameNoExt) in self._collisionCache:
newNameNoExt = f"{collidingNameNoExt} ({i})"
i += 1
self._renameCache[pathToRename] = (f"{newNameNoExt}{ext}", createdTime, lastEditedTime)
self._collisionCache[os.path.join(path, newNameNoExt)] = True
return self._renameCache[pathToRename]
def renameWithNotion(self, pathToRename):
return self.renameAndTimesWithNotion(pathToRename)[0]
def renamePathWithNotion(self, pathToRename):
pathToRenameSplit = re.split(r"[\\/]", pathToRename)
paths = [os.path.join(*pathToRenameSplit[0:rpc + 1]) for rpc in range(len(pathToRenameSplit))]
return os.path.join(*[self.renameWithNotion(rp) for rp in paths])
def renamePathAndTimesWithNotion(self, pathToRename):
newPath = self.renamePathWithNotion(os.path.dirname(pathToRename))
newName, createdTime, lastEditedTime = self.renameAndTimesWithNotion(pathToRename)
return (os.path.join(newPath, newName), createdTime, lastEditedTime)
def mdFileRewrite(renamer, mdFilePath, mdFileContents=None, removeTopH1=False, rewritePaths=False):
if not mdFileContents:
raise NotImplementedError("TODO: Not passing mdFileContents is not implemented... please pass it ;w;")
newMDFileContents = mdFileContents
if removeTopH1:
lines = mdFileContents.split("\n")
newMDFileContents = "\n".join(lines[1:])
if rewritePaths:
# Notion link/images use relative paths to other notes, which we can't known without
# consulting the file tree and renaming (to handle duplicates and such)
# Notion links are also URL encoded
# Can't use finditer because we modify the string each time...
searchStartIndex = 0
while True:
m = re.search(r"!?\[.+?\]\(([\w\d\-._~:/?=#%\]\[@!$&'\(\)*+,;]+?)\)", newMDFileContents[searchStartIndex:])
if not m:
break
if re.search(r":/", m.group(1)):
searchStartIndex = searchStartIndex + m.end(1)
continue # Not a local file path
relTargetFilePath = urllib.parse.unquote(m.group(1))
# Convert the current MD file path and link target path to the renamed version
# (also taking into account potentially mdFilePath renames moving the directory)
mdDirPath = os.path.dirname(mdFilePath)
newTargetFilePath = renamer.renamePathWithNotion(os.path.join(mdDirPath, relTargetFilePath))
newMDDirPath = os.path.dirname(renamer.renamePathWithNotion(mdFilePath))
# Find the relative path to the newly converted paths for both files
newRelTargetFilePath = os.path.relpath(newTargetFilePath, newMDDirPath)
# Convert back to the way markdown expects the link to be
newRelTargetFilePath = re.sub(r"\\", "/", newRelTargetFilePath)
newRelTargetFilePath = urllib.parse.quote(newRelTargetFilePath)
# Replace the path in the original string with the new relative renamed
# target path
newMDFileContents = newMDFileContents[0:m.start(1) + searchStartIndex] + newRelTargetFilePath + newMDFileContents[m.end(1) + searchStartIndex:]
searchStartIndex = searchStartIndex + m.start(1) + len(newRelTargetFilePath)
return newMDFileContents
def rewriteNotionZip(notionClient, zipPath, outputPath=".", removeTopH1=False, rewritePaths=True):
with tempfile.TemporaryDirectory() as tmpDir:
# Unpack the whole thing first (probably faster than traversing it zipped, like with tar files)
print(f"Extracting '{zipPath}' temporarily...")
with zipfile.ZipFile(zipPath) as zf:
zf.extractall(tmpDir)
# Make new zip to begin filling
zipName = os.path.basename(zipPath)
newZipName = f"{zipName}.formatted"
newZipPath = os.path.join(outputPath, newZipName)
with zipfile.ZipFile(newZipPath, 'w', zipfile.ZIP_DEFLATED) as zf:
#Traverse over the files, renaming, modifying, and rewriting back to the zip
renamer = NotionExportRenamer(notionClient, tmpDir)
for tmpWalkDir, dirs, files in os.walk(tmpDir):
walkDir = os.path.relpath(tmpWalkDir, tmpDir)
for name in files:
realPath = os.path.join(tmpWalkDir, name)
relPath = os.path.join("" if walkDir == "." else walkDir, name) # Prevent paths starting with .\\ which, when written to the tar, do annoying things
# print(f"Reading '{root}' '{name}'")
# Rewrite the current path and get the times from Notion
print("---")
print(f"Working on '{relPath}'")
newPath, createdTime, lastEditedTime = renamer.renamePathAndTimesWithNotion(relPath)
if os.path.splitext(name)[1] == ".md":
# Grab the data from the file if md file
with open(realPath, "r", encoding='utf-8') as f:
mdFileData = f.read()
mdFileData = mdFileRewrite(renamer, relPath, mdFileContents=mdFileData, removeTopH1=removeTopH1, rewritePaths=rewritePaths)
print(f"Writing as '{newPath}' with time '{lastEditedTime}'")
zi = zipfile.ZipInfo(newPath, lastEditedTime.timetuple())
zf.writestr(zi, mdFileData)
else:
print(f"Writing as '{newPath}' with time from original export (not an .md file)")
zf.write(realPath, newPath)
return newZipPath
def cli(argv):
parser = argparse.ArgumentParser(description='Prettifies Notion .zip exports')
parser.add_argument('token_v2', type=str,
help='the token for your Notion.so session')
parser.add_argument('zip_path', type=str,
help='the path to the Notion exported .zip file')
parser.add_argument('--output-path', action='store', type=str, default=".",
help='The path to output to, defaults to cwd')
parser.add_argument('--remove-title', action='store_true',
help='Removes the title that Notion adds. H1s at the top of every file')
parser.add_argument('--rewrite-paths', action='store_false', default=True,
help='Rewrite the paths in the Markdown files themselves to match file renaming')
args = parser.parse_args(argv)
startTime = time.time()
nCl = NotionClient(token_v2=args.token_v2)
nCl.get_block = backoff.on_exception(backoff.expo,
requests.exceptions.HTTPError,
max_tries=5,
)(nCl.get_block)
outFileName = rewriteNotionZip(nCl, args.zip_path, outputPath=args.output_path,
removeTopH1=args.remove_title, rewritePaths=args.rewrite_paths)
print("--- Finished in %s seconds ---" % (time.time() - startTime))
print(f"Output file written as '{outFileName}'")
if __name__ == "__main__":
cli(sys.argv[1:])
| true | true |
f7140a3806b353cd7b15d13284304cbfff67879e | 5,382 | py | Python | examples/old/zipline_alpaca2.py | sherrytp/TradingEvolved | 4bc9cc18244954bff37a80f67cce658bd0802b5d | [
"Apache-2.0"
] | null | null | null | examples/old/zipline_alpaca2.py | sherrytp/TradingEvolved | 4bc9cc18244954bff37a80f67cce658bd0802b5d | [
"Apache-2.0"
] | null | null | null | examples/old/zipline_alpaca2.py | sherrytp/TradingEvolved | 4bc9cc18244954bff37a80f67cce658bd0802b5d | [
"Apache-2.0"
] | 1 | 2022-03-26T07:11:18.000Z | 2022-03-26T07:11:18.000Z | # https://github.com/RomanMichaelPaolucci/AI_Stock_Trading/blob/master/IBM.csv
import abc
import threading
import time
import pandas as pd
import numpy as np
from keras.layers import Dense
from keras.models import Sequential, model_from_json
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from alpaca_trade_api import REST
class AlpacaPaperSocket(REST):
def __init__(self):
super().__init__(
key_id='PKPO0ZH3XTVB336B7TEO',
secret_key='gcs4U2Hp/ACI4A5UwYjYugrPqB2odD/m40Zuz5qw',
base_url='https://paper-api.alpaca.markets'
)
class TradingSystem(abc.ABC):
def __init__(self, api, symbol, time_frame, system_id, system_label):
# Connect to api
# Connect to BrokenPipeError
# Save fields to class
self.api = api
self.symbol = symbol
self.time_frame = time_frame
self.system_id = system_id
self.system_label = system_label
thread = threading.Thread(target=self.system_loop)
thread.start()
@abc.abstractmethod
def place_buy_order(self):
pass
@abc.abstractmethod
def place_sell_order(self):
pass
@abc.abstractmethod
def system_loop(self):
pass
# Class to develop your AI portfolio manager
class PMModelDevelopment:
def __init__(self):
# Read your data in and split the dependent and independent
data = pd.read_csv('IBM.csv')
X = data['Delta Close']
y = data.drop(['Delta Close'], axis=1)
# Train test spit
X_train, X_test, y_train, y_test = train_test_split(X, y)
# Create the sequential
network = Sequential()
# Create the structure of the neural network
network.add(Dense(1, input_shape=(1,), activation='tanh'))
network.add(Dense(3, activation='tanh'))
network.add(Dense(3, activation='tanh'))
network.add(Dense(3, activation='tanh'))
network.add(Dense(1, activation='tanh'))
# Compile the model
network.compile(
optimizer='rmsprop',
loss='hinge',
metrics=['accuracy']
)
# Train the model
network.fit(X_train.values, y_train.values, epochs=100)
# Evaluate the predictions of the model
y_pred = network.predict(X_test.values)
y_pred = np.around(y_pred, 0)
print(classification_report(y_test, y_pred))
# Save structure to json
model = network.to_json()
with open("model.json", "w") as json_file:
json_file.write(model)
# Save weights to HDF5
network.save_weights("weights.h5")
# AI Portfolio Manager
class PortfolioManagementModel:
def __init__(self):
# Data in to test that the saving of weights worked
data = pd.read_csv('IBM.csv')
X = data['Delta Close']
y = data.drop(['Delta Close'], axis=1)
# Read structure from json
json_file = open('model.json', 'r')
json = json_file.read()
json_file.close()
self.network = model_from_json(json)
# Read weights from HDF5
self.network.load_weights("weights.h5")
# Verify weights and structure are loaded
y_pred = self.network.predict(X.values)
y_pred = np.around(y_pred, 0)
print(classification_report(y, y_pred))
PortfolioManagementModel()
# in implemenation create a vector to store data...
class PortfolioManagementSystem(TradingSystem):
def __init__(self):
super().__init__(AlpacaPaperSocket(), 'IBM', 86400, 1, 'AI_PM')
self.AI = PortfolioManagementModel()
def place_buy_order(self):
self.api.submit_order(
symbol='IBM',
qty=1,
side='buy',
type='market',
time_in_force='day',
)
def place_sell_order(self):
self.api.submit_order(
symbol='IBM',
qty=1,
side='sell',
type='market',
time_in_force='day',
)
def system_loop(self):
# Variables for weekly close
this_weeks_close = 0
last_weeks_close = 0
delta = 0
day_count = 0
while(True):
# Wait a day to request more data
time.sleep(1440)
# Request EoD data for IBM
data_req = self.api.get_barset('IBM', timeframe='1D', limit=1).df
# Construct dataframe to predict
x = pd.DataFrame(
data=[[
data_req['IBM']['close'][0]]], columns='Close'.split()
)
if(day_count == 7):
day_count = 0
last_weeks_close = this_weeks_close
this_weeks_close = x['Close']
delta = this_weeks_close - last_weeks_close
# AI choosing to buy, sell, or hold
if np.around(self.AI.network.predict([delta])) <= -.5:
self.place_sell_order()
elif np.around(self.AI.network.predict([delta]) >= .5):
self.place_buy_order()
PortfolioManagementSystem()
| 30.40678 | 78 | 0.579153 |
import abc
import threading
import time
import pandas as pd
import numpy as np
from keras.layers import Dense
from keras.models import Sequential, model_from_json
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from alpaca_trade_api import REST
class AlpacaPaperSocket(REST):
def __init__(self):
super().__init__(
key_id='PKPO0ZH3XTVB336B7TEO',
secret_key='gcs4U2Hp/ACI4A5UwYjYugrPqB2odD/m40Zuz5qw',
base_url='https://paper-api.alpaca.markets'
)
class TradingSystem(abc.ABC):
def __init__(self, api, symbol, time_frame, system_id, system_label):
self.api = api
self.symbol = symbol
self.time_frame = time_frame
self.system_id = system_id
self.system_label = system_label
thread = threading.Thread(target=self.system_loop)
thread.start()
@abc.abstractmethod
def place_buy_order(self):
pass
@abc.abstractmethod
def place_sell_order(self):
pass
@abc.abstractmethod
def system_loop(self):
pass
class PMModelDevelopment:
def __init__(self):
data = pd.read_csv('IBM.csv')
X = data['Delta Close']
y = data.drop(['Delta Close'], axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y)
network = Sequential()
network.add(Dense(1, input_shape=(1,), activation='tanh'))
network.add(Dense(3, activation='tanh'))
network.add(Dense(3, activation='tanh'))
network.add(Dense(3, activation='tanh'))
network.add(Dense(1, activation='tanh'))
network.compile(
optimizer='rmsprop',
loss='hinge',
metrics=['accuracy']
)
network.fit(X_train.values, y_train.values, epochs=100)
y_pred = network.predict(X_test.values)
y_pred = np.around(y_pred, 0)
print(classification_report(y_test, y_pred))
model = network.to_json()
with open("model.json", "w") as json_file:
json_file.write(model)
network.save_weights("weights.h5")
class PortfolioManagementModel:
def __init__(self):
data = pd.read_csv('IBM.csv')
X = data['Delta Close']
y = data.drop(['Delta Close'], axis=1)
json_file = open('model.json', 'r')
json = json_file.read()
json_file.close()
self.network = model_from_json(json)
self.network.load_weights("weights.h5")
y_pred = self.network.predict(X.values)
y_pred = np.around(y_pred, 0)
print(classification_report(y, y_pred))
PortfolioManagementModel()
class PortfolioManagementSystem(TradingSystem):
def __init__(self):
super().__init__(AlpacaPaperSocket(), 'IBM', 86400, 1, 'AI_PM')
self.AI = PortfolioManagementModel()
def place_buy_order(self):
self.api.submit_order(
symbol='IBM',
qty=1,
side='buy',
type='market',
time_in_force='day',
)
def place_sell_order(self):
self.api.submit_order(
symbol='IBM',
qty=1,
side='sell',
type='market',
time_in_force='day',
)
def system_loop(self):
this_weeks_close = 0
last_weeks_close = 0
delta = 0
day_count = 0
while(True):
time.sleep(1440)
data_req = self.api.get_barset('IBM', timeframe='1D', limit=1).df
x = pd.DataFrame(
data=[[
data_req['IBM']['close'][0]]], columns='Close'.split()
)
if(day_count == 7):
day_count = 0
last_weeks_close = this_weeks_close
this_weeks_close = x['Close']
delta = this_weeks_close - last_weeks_close
if np.around(self.AI.network.predict([delta])) <= -.5:
self.place_sell_order()
elif np.around(self.AI.network.predict([delta]) >= .5):
self.place_buy_order()
PortfolioManagementSystem()
| true | true |
f7140adcd0356882246b5c8d05e68682363f163a | 5,186 | py | Python | src/calculator.py | u-aaa/simple_calculator | 562231083fca44ffc0825295179af14d30df17bb | [
"MIT"
] | null | null | null | src/calculator.py | u-aaa/simple_calculator | 562231083fca44ffc0825295179af14d30df17bb | [
"MIT"
] | null | null | null | src/calculator.py | u-aaa/simple_calculator | 562231083fca44ffc0825295179af14d30df17bb | [
"MIT"
] | 1 | 2021-08-17T18:59:53.000Z | 2021-08-17T18:59:53.000Z | class Calculator:
""""
This calculator performs the following basic mathematical operations:
* Addition
* Subtraction
* Division
* Multiplication
* nth root of number
* exponent
Attributes
----------
__value : (int or float)
the calculator memory value
Methods
--------
input_validation(new_value):
validates that the value entered is a number or float
add(new_value: int or float):
adds the new value to the value in the calculator memory
subtract(new_value: int or float):
subtracts the new value from the value in the calculator memory
multiply(new_value: int or float):
multiplies the new value with the value in the calculator memory
divide(new_value: int or float):
divides the value in the calculator memory with the new value
n_root(root: int or float):
takes the (n) root of the value in the calculator memory
exponent(exponent: int or float):
raises the values in the calculator memory to the power of the inputted value
reset_memory():
resets the calculator memory value to 0
memory_value():
returns the calculator memory value
"""
def __init__(self, value = 0) -> None:
"""
initializes the memory value
"""
self.__input_validation(value)
self.__value = value
def __input_validation(self, new_value: (int, float)) -> None:
"""
validates that the inputted value is an integer or float
"""
if not isinstance(new_value, (int,float)):
raise NotANumber(new_value)
def add(self, new_value: (int,float)) -> (int, float):
"""
adds the new value to the value in the calculator memory
"""
self.__input_validation(new_value)
self.__value += new_value
return self.__value
def subtract(self, new_value: (int, float)) -> (int, float):
"""
subtracts the new value from the value in the calculator memory
"""
self.__input_validation(new_value)
self.__value -= new_value
return self.__value
def multiply(self, new_value: (int, float)) -> (int, float):
"""
multiplies the new value with the value in the calculator memory
"""
self.__input_validation(new_value)
self.__value *= new_value
return self.__value
def divide(self, new_value: (int, float)) -> (int, float):
"""
divides the value in the calculator memory with the new value
"""
self.__input_validation(new_value)
self.__value /= new_value
return self.__value
#except (ZeroDivisionError) as err:
#print(f'Cannot divide by zero -> {err}')
def n_root(self, root: (int, float)) -> (int, float):
"""
takes the (n) root of the value in the calculator memory
"""
self.__input_validation(root)
if root <= 0:
raise NotAPositiveNumber(root, type= 'Inputted value')
elif self.__value <= 0:
raise NotAPositiveNumber(self.__value, type= 'Memory value')
else:
self.__value = self.__value ** (1/root)
return self.__value
def exponent(self, exponent: (int, float)) -> (int, float):
"""
raises the values in the calculator memory to the power of the inputted value
"""
self.__input_validation(exponent)
self.__value = self.__value ** exponent
return self.__value
def reset_memory(self) -> (int, float):
"""
resets the calculator memory value to 0
"""
self.__value = 0
return self.__value
def memory_value(self) -> (int, float):
return self.__value
class NotANumber(Exception):
"""
Raises an error stating the input is not a number
Methods
--------
__init__(value, message):
initializes the error class
__str__():
returns the inputted value and error message
"""
def __init__(self, value, message = 'is not a number'):
"""
initializes the error class
"""
self.__message = message
self.__value = value
super().__init__(self.__message)
def __str__(self):
"""
returns the inputted value and error message
"""
return f'"{self.__value}" {self.__message}'
class NotAPositiveNumber(Exception):
"""
Raises an error stating the input is not a positive number
Methods
--------
__init__(value, message):
initializes the error class
__str__():
returns the inputted value and error message
"""
def __init__(self, value, message = 'is not a positive number.The function only accepts positive numbers', type = ''):
"""
initializes the error class
"""
self.__message = message
self.__value = value
self.__type = type
super().__init__(self.__message)
def __str__(self):
""" returns the value and error message"""
return f'{self.__type} ({self.__value}) {self.__message}'
| 29.299435 | 122 | 0.604898 | class Calculator:
def __init__(self, value = 0) -> None:
self.__input_validation(value)
self.__value = value
def __input_validation(self, new_value: (int, float)) -> None:
if not isinstance(new_value, (int,float)):
raise NotANumber(new_value)
def add(self, new_value: (int,float)) -> (int, float):
self.__input_validation(new_value)
self.__value += new_value
return self.__value
def subtract(self, new_value: (int, float)) -> (int, float):
self.__input_validation(new_value)
self.__value -= new_value
return self.__value
def multiply(self, new_value: (int, float)) -> (int, float):
self.__input_validation(new_value)
self.__value *= new_value
return self.__value
def divide(self, new_value: (int, float)) -> (int, float):
self.__input_validation(new_value)
self.__value /= new_value
return self.__value
def n_root(self, root: (int, float)) -> (int, float):
self.__input_validation(root)
if root <= 0:
raise NotAPositiveNumber(root, type= 'Inputted value')
elif self.__value <= 0:
raise NotAPositiveNumber(self.__value, type= 'Memory value')
else:
self.__value = self.__value ** (1/root)
return self.__value
def exponent(self, exponent: (int, float)) -> (int, float):
self.__input_validation(exponent)
self.__value = self.__value ** exponent
return self.__value
def reset_memory(self) -> (int, float):
self.__value = 0
return self.__value
def memory_value(self) -> (int, float):
return self.__value
class NotANumber(Exception):
def __init__(self, value, message = 'is not a number'):
self.__message = message
self.__value = value
super().__init__(self.__message)
def __str__(self):
return f'"{self.__value}" {self.__message}'
class NotAPositiveNumber(Exception):
def __init__(self, value, message = 'is not a positive number.The function only accepts positive numbers', type = ''):
self.__message = message
self.__value = value
self.__type = type
super().__init__(self.__message)
def __str__(self):
return f'{self.__type} ({self.__value}) {self.__message}'
| true | true |
f7140bdb4009f76fa9ca6d9f7fe0d41411f83b25 | 3,237 | py | Python | homeassistant/components/harmony/__init__.py | unverbraucht/core | 312af53935a1bffd58b3b35e82e31292a6ec22ad | [
"Apache-2.0"
] | 2 | 2019-11-20T20:56:59.000Z | 2021-01-03T08:52:18.000Z | homeassistant/components/harmony/__init__.py | shownor/core | b50281a9173e7fb4a37b3f813ca92876088eaac3 | [
"Apache-2.0"
] | 5 | 2020-04-26T10:50:01.000Z | 2021-03-16T21:19:46.000Z | homeassistant/components/harmony/__init__.py | winterscar/core | 5a55d508791aae65f16396691d014c73fb2095f0 | [
"Apache-2.0"
] | 1 | 2021-04-18T19:36:34.000Z | 2021-04-18T19:36:34.000Z | """The Logitech Harmony Hub integration."""
import asyncio
import logging
from homeassistant.components.remote import ATTR_ACTIVITY, ATTR_DELAY_SECS
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import DOMAIN, HARMONY_OPTIONS_UPDATE, PLATFORMS
from .remote import HarmonyRemote
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Logitech Harmony Hub component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Logitech Harmony Hub from a config entry."""
# As there currently is no way to import options from yaml
# when setting up a config entry, we fallback to adding
# the options to the config entry and pull them out here if
# they are missing from the options
_async_import_options_from_data_if_missing(hass, entry)
address = entry.data[CONF_HOST]
name = entry.data[CONF_NAME]
activity = entry.options.get(ATTR_ACTIVITY)
delay_secs = entry.options.get(ATTR_DELAY_SECS)
harmony_conf_file = hass.config.path(f"harmony_{entry.unique_id}.conf")
try:
device = HarmonyRemote(
name, entry.unique_id, address, activity, harmony_conf_file, delay_secs
)
connected_ok = await device.connect()
except (asyncio.TimeoutError, ValueError, AttributeError):
raise ConfigEntryNotReady
if not connected_ok:
raise ConfigEntryNotReady
hass.data[DOMAIN][entry.entry_id] = device
entry.add_update_listener(_update_listener)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
modified = 0
for importable_option in [ATTR_ACTIVITY, ATTR_DELAY_SECS]:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
modified = 1
if modified:
hass.config_entries.async_update_entry(entry, options=options)
async def _update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
async_dispatcher_send(
hass, f"{HARMONY_OPTIONS_UPDATE}-{entry.unique_id}", entry.options
)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
# Shutdown a harmony remote for removal
device = hass.data[DOMAIN][entry.entry_id]
await device.shutdown()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| 32.049505 | 88 | 0.724745 | import asyncio
import logging
from homeassistant.components.remote import ATTR_ACTIVITY, ATTR_DELAY_SECS
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import DOMAIN, HARMONY_OPTIONS_UPDATE, PLATFORMS
from .remote import HarmonyRemote
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
_async_import_options_from_data_if_missing(hass, entry)
address = entry.data[CONF_HOST]
name = entry.data[CONF_NAME]
activity = entry.options.get(ATTR_ACTIVITY)
delay_secs = entry.options.get(ATTR_DELAY_SECS)
harmony_conf_file = hass.config.path(f"harmony_{entry.unique_id}.conf")
try:
device = HarmonyRemote(
name, entry.unique_id, address, activity, harmony_conf_file, delay_secs
)
connected_ok = await device.connect()
except (asyncio.TimeoutError, ValueError, AttributeError):
raise ConfigEntryNotReady
if not connected_ok:
raise ConfigEntryNotReady
hass.data[DOMAIN][entry.entry_id] = device
entry.add_update_listener(_update_listener)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
modified = 0
for importable_option in [ATTR_ACTIVITY, ATTR_DELAY_SECS]:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
modified = 1
if modified:
hass.config_entries.async_update_entry(entry, options=options)
async def _update_listener(hass: HomeAssistant, entry: ConfigEntry):
async_dispatcher_send(
hass, f"{HARMONY_OPTIONS_UPDATE}-{entry.unique_id}", entry.options
)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
device = hass.data[DOMAIN][entry.entry_id]
await device.shutdown()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| true | true |
f7140c4bcf3648504d65dea8b56a9087ccad5b5d | 16,838 | py | Python | src/utils.py | amelieEmily/RobustDARTS | b26e127c6e9c330258786f5eb77b17d367f546ff | [
"Apache-2.0"
] | null | null | null | src/utils.py | amelieEmily/RobustDARTS | b26e127c6e9c330258786f5eb77b17d367f546ff | [
"Apache-2.0"
] | null | null | null | src/utils.py | amelieEmily/RobustDARTS | b26e127c6e9c330258786f5eb77b17d367f546ff | [
"Apache-2.0"
] | null | null | null | import os
import yaml
import numpy as np
import torch
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
from collections import namedtuple
class MyDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super(MyDumper, self).increase_indent(flow, False)
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
PRIMITIVES = [
'none',
'noise',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
class EVLocalAvg(object):
def __init__(self, window=5, ev_freq=2, total_epochs=50):
""" Keep track of the eigenvalues local average.
Args:
window (int): number of elements used to compute local average.
Default: 5
ev_freq (int): frequency used to compute eigenvalues. Default:
every 2 epochs
total_epochs (int): total number of epochs that DARTS runs.
Default: 50
"""
self.window = window
self.ev_freq = ev_freq
self.epochs = total_epochs
self.stop_search = False
self.stop_epoch = total_epochs - 1
self.stop_genotype = None
self.ev = []
self.ev_local_avg = []
self.genotypes = {}
self.la_epochs = {}
# start and end index of the local average window
self.la_start_idx = 0
self.la_end_idx = self.window
def reset(self):
self.ev = []
self.ev_local_avg = []
self.genotypes = {}
self.la_epochs = {}
def update(self, epoch, ev, genotype):
""" Method to update the local average list.
Args:
epoch (int): current epoch
ev (float): current dominant eigenvalue
genotype (namedtuple): current genotype
"""
self.ev.append(ev)
self.genotypes.update({epoch: genotype})
# set the stop_genotype to the current genotype in case the early stop
# procedure decides not to early stop
self.stop_genotype = genotype
# since the local average computation starts after the dominant
# eigenvalue in the first epoch is already computed we have to wait
# at least until we have 3 eigenvalues in the list.
if (len(self.ev) >= int(np.ceil(self.window/2))) and (epoch <
self.epochs - 1):
# start sliding the window as soon as the number of eigenvalues in
# the list becomes equal to the window size
if len(self.ev) < self.window:
self.ev_local_avg.append(np.mean(self.ev))
else:
assert len(self.ev[self.la_start_idx: self.la_end_idx]) == self.window
self.ev_local_avg.append(np.mean(self.ev[self.la_start_idx:
self.la_end_idx]))
self.la_start_idx += 1
self.la_end_idx += 1
# keep track of the offset between the current epoch and the epoch
# corresponding to the local average. NOTE: in the end the size of
# self.ev and self.ev_local_avg should be equal
self.la_epochs.update({epoch: int(epoch -
int(self.ev_freq*np.floor(self.window/2)))})
elif len(self.ev) < int(np.ceil(self.window/2)):
self.la_epochs.update({epoch: -1})
# since there is an offset between the current epoch and the local
# average epoch, loop in the last epoch to compute the local average of
# these number of elements: window, window - 1, window - 2, ..., ceil(window/2)
elif epoch == self.epochs - 1:
for i in range(int(np.ceil(self.window/2))):
assert len(self.ev[self.la_start_idx: self.la_end_idx]) == self.window - i
self.ev_local_avg.append(np.mean(self.ev[self.la_start_idx:
self.la_end_idx + 1]))
self.la_start_idx += 1
def early_stop(self, epoch, factor=1.3, es_start_epoch=10, delta=4):
""" Early stopping criterion
Args:
epoch (int): current epoch
factor (float): threshold factor for the ration between the current
and prefious eigenvalue. Default: 1.3
es_start_epoch (int): until this epoch do not consider early
stopping. Default: 20
delta (int): factor influencing which previous local average we
consider for early stopping. Default: 2
"""
if int(self.la_epochs[epoch] - self.ev_freq*delta) >= es_start_epoch:
# the current local average corresponds to
# epoch - int(self.ev_freq*np.floor(self.window/2))
current_la = self.ev_local_avg[-1]
# by default take the local average corresponding to epoch
# delta*self.ev_freq
previous_la = self.ev_local_avg[-1 - delta]
self.stop_search = current_la / previous_la > factor
if self.stop_search:
self.stop_epoch = int(self.la_epochs[epoch] - self.ev_freq*delta)
self.stop_genotype = self.genotypes[self.stop_epoch]
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
def write_yaml_results_eval(args, results_file, result_to_log):
setting = '_'.join([args.space, args.dataset])
regularization = '_'.join(
[str(args.search_dp), str(args.search_wd)]
)
results_file = os.path.join(args._save, results_file+'.yaml')
try:
with open(results_file, 'r') as f:
result = yaml.load(f, Loader=yaml.Loader)
if setting in result.keys():
if regularization in result[setting].keys():
if args.search_task_id in result[setting][regularization]:
result[setting][regularization][args.search_task_id].append(result_to_log)
else:
result[setting][regularization].update({args.search_task_id:
[result_to_log]})
else:
result[setting].update({regularization: {args.search_task_id:
[result_to_log]}})
else:
result.update({setting: {regularization: {args.search_task_id:
[result_to_log]}}})
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
except (AttributeError, FileNotFoundError) as e:
result = {
setting: {
regularization: {
args.search_task_id: [result_to_log]
}
}
}
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
def write_yaml_results(args, results_file, result_to_log):
setting = '_'.join([args.space, args.dataset])
regularization = '_'.join(
[str(args.drop_path_prob), str(args.weight_decay)]
)
results_file = os.path.join(args._save, results_file+'.yaml')
try:
with open(results_file, 'r') as f:
result = yaml.load(f, Loader=yaml.Loader)
if setting in result.keys():
if regularization in result[setting].keys():
result[setting][regularization].update({args.task_id: result_to_log})
else:
result[setting].update({regularization: {args.task_id: result_to_log}})
else:
result.update({setting: {regularization: {args.task_id: result_to_log}}})
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
except (AttributeError, FileNotFoundError) as e:
result = {
setting: {
regularization: {
args.task_id: result_to_log
}
}
}
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
class Cutout(object):
def __init__(self, length, prob=1.0):
self.length = length
self.prob = prob
def __call__(self, img):
if np.random.binomial(1, self.prob):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_svhn(args):
SVHN_MEAN = [0.4377, 0.4438, 0.4728]
SVHN_STD = [0.1980, 0.2010, 0.1970]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(SVHN_MEAN, SVHN_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(SVHN_MEAN, SVHN_STD),
])
return train_transform, valid_transform
def _data_transforms_dr_detection(args):
DR_DETECTION_MEAN = [0.42, 0.22, 0.075]
DR_DETECTION_STD = [0.27, 0.15, 0.081]
if args.is_eval:
train_transform = transforms.Compose([
transforms.Resize(540), # 256
transforms.RandomRotation((-45.0, +45.0)),
transforms.RandomResizedCrop(512, scale=(0.9, 1.1), ratio=(0.9, 1.1)), # 224
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(brightness=0.1, contrast=[0.75,1.5],
saturation=[0.75,1.5], hue=0.15),
transforms.ToTensor(),
transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD)
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.Resize(540),
transforms.CenterCrop(512),
transforms.ToTensor(),
transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD),
])
else:
train_transform = transforms.Compose([
transforms.Resize(256), # 256
transforms.RandomRotation((-45.0, +45.0)),
transforms.RandomResizedCrop(224, scale=(0.9, 1.1), ratio=(0.9, 1.1)), # 224
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(brightness=0.1, contrast=[0.75, 1.5],
saturation=[0.75, 1.5], hue=0.15),
transforms.ToTensor(),
transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD),
# transforms.RandomErasing(),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD),
])
return train_transform, valid_transform
def _data_transforms_malaria(args):
train_transform = transforms.Compose([
transforms.Resize(100),
transforms.RandomCrop(64), # 224
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.Resize(100),
transforms.RandomCrop(64), # 224
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
])
return train_transform, valid_transform
def _data_transforms_mnist(args):
MNIST_MEAN = [0.5, 0.5, 0.5]
MNIST_STD = [0.5, 0.5, 0.5]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(MNIST_MEAN, MNIST_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(MNIST_MEAN, MNIST_STD),
])
return train_transform, valid_transform
def _data_transforms_cifar100(args):
CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
CIFAR_STD = [0.2673, 0.2564, 0.2762]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for v in model.parameters())/1e6
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def save_checkpoint(state, is_best, save, epoch, task_id):
filename = "checkpoint_{}_{}.pth.tar".format(task_id, epoch)
filename = os.path.join(save, filename)
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def load_checkpoint(model, optimizer, scheduler, architect, save, la_tracker,
epoch, task_id):
filename = "checkpoint_{}_{}.pth.tar".format(task_id, epoch)
filename = os.path.join(save, filename)
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
model.alphas_normal.data = checkpoint['alphas_normal']
model.alphas_reduce.data = checkpoint['alphas_reduce']
optimizer.load_state_dict(checkpoint['optimizer'])
architect.optimizer.load_state_dict(checkpoint['arch_optimizer'])
la_tracker.ev = checkpoint['ev']
la_tracker.ev_local_avg = checkpoint['ev_local_avg']
la_tracker.genotypes = checkpoint['genotypes']
la_tracker.la_epochs = checkpoint['la_epochs']
la_tracker.la_start_idx = checkpoint['la_start_idx']
la_tracker.la_end_idx = checkpoint['la_end_idx']
lr = checkpoint['lr']
return lr
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def print_args(args):
for arg, val in args.__dict__.items():
print(arg + '.' * (50 - len(arg) - len(str(val))) + str(val))
print()
| 34.646091 | 90 | 0.62644 | import os
import yaml
import numpy as np
import torch
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
from collections import namedtuple
class MyDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super(MyDumper, self).increase_indent(flow, False)
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
PRIMITIVES = [
'none',
'noise',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
class EVLocalAvg(object):
def __init__(self, window=5, ev_freq=2, total_epochs=50):
self.window = window
self.ev_freq = ev_freq
self.epochs = total_epochs
self.stop_search = False
self.stop_epoch = total_epochs - 1
self.stop_genotype = None
self.ev = []
self.ev_local_avg = []
self.genotypes = {}
self.la_epochs = {}
self.la_start_idx = 0
self.la_end_idx = self.window
def reset(self):
self.ev = []
self.ev_local_avg = []
self.genotypes = {}
self.la_epochs = {}
def update(self, epoch, ev, genotype):
self.ev.append(ev)
self.genotypes.update({epoch: genotype})
self.stop_genotype = genotype
if (len(self.ev) >= int(np.ceil(self.window/2))) and (epoch <
self.epochs - 1):
if len(self.ev) < self.window:
self.ev_local_avg.append(np.mean(self.ev))
else:
assert len(self.ev[self.la_start_idx: self.la_end_idx]) == self.window
self.ev_local_avg.append(np.mean(self.ev[self.la_start_idx:
self.la_end_idx]))
self.la_start_idx += 1
self.la_end_idx += 1
self.la_epochs.update({epoch: int(epoch -
int(self.ev_freq*np.floor(self.window/2)))})
elif len(self.ev) < int(np.ceil(self.window/2)):
self.la_epochs.update({epoch: -1})
elif epoch == self.epochs - 1:
for i in range(int(np.ceil(self.window/2))):
assert len(self.ev[self.la_start_idx: self.la_end_idx]) == self.window - i
self.ev_local_avg.append(np.mean(self.ev[self.la_start_idx:
self.la_end_idx + 1]))
self.la_start_idx += 1
def early_stop(self, epoch, factor=1.3, es_start_epoch=10, delta=4):
if int(self.la_epochs[epoch] - self.ev_freq*delta) >= es_start_epoch:
current_la = self.ev_local_avg[-1]
previous_la = self.ev_local_avg[-1 - delta]
self.stop_search = current_la / previous_la > factor
if self.stop_search:
self.stop_epoch = int(self.la_epochs[epoch] - self.ev_freq*delta)
self.stop_genotype = self.genotypes[self.stop_epoch]
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
def write_yaml_results_eval(args, results_file, result_to_log):
setting = '_'.join([args.space, args.dataset])
regularization = '_'.join(
[str(args.search_dp), str(args.search_wd)]
)
results_file = os.path.join(args._save, results_file+'.yaml')
try:
with open(results_file, 'r') as f:
result = yaml.load(f, Loader=yaml.Loader)
if setting in result.keys():
if regularization in result[setting].keys():
if args.search_task_id in result[setting][regularization]:
result[setting][regularization][args.search_task_id].append(result_to_log)
else:
result[setting][regularization].update({args.search_task_id:
[result_to_log]})
else:
result[setting].update({regularization: {args.search_task_id:
[result_to_log]}})
else:
result.update({setting: {regularization: {args.search_task_id:
[result_to_log]}}})
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
except (AttributeError, FileNotFoundError) as e:
result = {
setting: {
regularization: {
args.search_task_id: [result_to_log]
}
}
}
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
def write_yaml_results(args, results_file, result_to_log):
setting = '_'.join([args.space, args.dataset])
regularization = '_'.join(
[str(args.drop_path_prob), str(args.weight_decay)]
)
results_file = os.path.join(args._save, results_file+'.yaml')
try:
with open(results_file, 'r') as f:
result = yaml.load(f, Loader=yaml.Loader)
if setting in result.keys():
if regularization in result[setting].keys():
result[setting][regularization].update({args.task_id: result_to_log})
else:
result[setting].update({regularization: {args.task_id: result_to_log}})
else:
result.update({setting: {regularization: {args.task_id: result_to_log}}})
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
except (AttributeError, FileNotFoundError) as e:
result = {
setting: {
regularization: {
args.task_id: result_to_log
}
}
}
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
class Cutout(object):
def __init__(self, length, prob=1.0):
self.length = length
self.prob = prob
def __call__(self, img):
if np.random.binomial(1, self.prob):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_svhn(args):
SVHN_MEAN = [0.4377, 0.4438, 0.4728]
SVHN_STD = [0.1980, 0.2010, 0.1970]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(SVHN_MEAN, SVHN_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(SVHN_MEAN, SVHN_STD),
])
return train_transform, valid_transform
def _data_transforms_dr_detection(args):
DR_DETECTION_MEAN = [0.42, 0.22, 0.075]
DR_DETECTION_STD = [0.27, 0.15, 0.081]
if args.is_eval:
train_transform = transforms.Compose([
transforms.Resize(540),
transforms.RandomRotation((-45.0, +45.0)),
transforms.RandomResizedCrop(512, scale=(0.9, 1.1), ratio=(0.9, 1.1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(brightness=0.1, contrast=[0.75,1.5],
saturation=[0.75,1.5], hue=0.15),
transforms.ToTensor(),
transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD)
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.Resize(540),
transforms.CenterCrop(512),
transforms.ToTensor(),
transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD),
])
else:
train_transform = transforms.Compose([
transforms.Resize(256),
transforms.RandomRotation((-45.0, +45.0)),
transforms.RandomResizedCrop(224, scale=(0.9, 1.1), ratio=(0.9, 1.1)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(brightness=0.1, contrast=[0.75, 1.5],
saturation=[0.75, 1.5], hue=0.15),
transforms.ToTensor(),
transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=DR_DETECTION_MEAN, std=DR_DETECTION_STD),
])
return train_transform, valid_transform
def _data_transforms_malaria(args):
train_transform = transforms.Compose([
transforms.Resize(100),
transforms.RandomCrop(64),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.Resize(100),
transforms.RandomCrop(64),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
])
return train_transform, valid_transform
def _data_transforms_mnist(args):
MNIST_MEAN = [0.5, 0.5, 0.5]
MNIST_STD = [0.5, 0.5, 0.5]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(MNIST_MEAN, MNIST_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(MNIST_MEAN, MNIST_STD),
])
return train_transform, valid_transform
def _data_transforms_cifar100(args):
CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
CIFAR_STD = [0.2673, 0.2564, 0.2762]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for v in model.parameters())/1e6
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def save_checkpoint(state, is_best, save, epoch, task_id):
filename = "checkpoint_{}_{}.pth.tar".format(task_id, epoch)
filename = os.path.join(save, filename)
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def load_checkpoint(model, optimizer, scheduler, architect, save, la_tracker,
epoch, task_id):
filename = "checkpoint_{}_{}.pth.tar".format(task_id, epoch)
filename = os.path.join(save, filename)
checkpoint = torch.load(filename)
model.load_state_dict(checkpoint['state_dict'])
model.alphas_normal.data = checkpoint['alphas_normal']
model.alphas_reduce.data = checkpoint['alphas_reduce']
optimizer.load_state_dict(checkpoint['optimizer'])
architect.optimizer.load_state_dict(checkpoint['arch_optimizer'])
la_tracker.ev = checkpoint['ev']
la_tracker.ev_local_avg = checkpoint['ev_local_avg']
la_tracker.genotypes = checkpoint['genotypes']
la_tracker.la_epochs = checkpoint['la_epochs']
la_tracker.la_start_idx = checkpoint['la_start_idx']
la_tracker.la_end_idx = checkpoint['la_end_idx']
lr = checkpoint['lr']
return lr
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def print_args(args):
for arg, val in args.__dict__.items():
print(arg + '.' * (50 - len(arg) - len(str(val))) + str(val))
print()
| true | true |
f7140cef6c17f8b6d97948c204447446cb386e9d | 3,433 | py | Python | samples/basic/crud/gnmi/models/cisco-ios-xr/Cisco-IOS-XR-ip-domain-cfg/gn-create-xr-ip-domain-cfg-33-ydk.py | deom119/ydk-py-samples | 1ad6cc2b798f358ff835df93d12924df308b85fc | [
"Apache-2.0"
] | 104 | 2016-03-15T17:04:01.000Z | 2021-12-31T06:09:35.000Z | samples/basic/crud/gnmi/models/cisco-ios-xr/Cisco-IOS-XR-ip-domain-cfg/gn-create-xr-ip-domain-cfg-33-ydk.py | https-maxus-github-com/ydk-py-samples | 1ad6cc2b798f358ff835df93d12924df308b85fc | [
"Apache-2.0"
] | 15 | 2016-03-15T23:09:47.000Z | 2020-08-13T12:13:18.000Z | samples/basic/crud/gnmi/models/cisco-ios-xr/Cisco-IOS-XR-ip-domain-cfg/gn-create-xr-ip-domain-cfg-33-ydk.py | https-maxus-github-com/ydk-py-samples | 1ad6cc2b798f358ff835df93d12924df308b85fc | [
"Apache-2.0"
] | 87 | 2016-04-15T16:59:23.000Z | 2021-09-18T18:05:47.000Z | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create configuration for model Cisco-IOS-XR-ip-domain-cfg.
usage: gn-create-xr-ip-domain-cfg-33-ydk.py [-h] [-v] device
positional arguments:
device gNMI device (http://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.path import Repository
from ydk.services import CRUDService
from ydk.gnmi.providers import gNMIServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ip_domain_cfg \
as xr_ip_domain_cfg
import os
import logging
YDK_REPO_DIR = os.path.expanduser("~/.ydk/")
def config_ip_domain(ip_domain):
"""Add config data to ip_domain object."""
vrf = ip_domain.vrfs.Vrf()
vrf.vrf_name = "RED"
vrf.name = "red.example"
# first name server
server = vrf.servers.Server()
server.order = 0
server.server_address = "2001:db8:800a::1"
vrf.servers.server.append(server)
# second name server
server = vrf.servers.Server()
server.order = 1
server.server_address = "2001:db8:800a::2"
vrf.servers.server.append(server)
# third name server
server = vrf.servers.Server()
server.order = 2
server.server_address = "2001:db8:800a::3"
vrf.servers.server.append(server)
ip_domain.vrfs.vrf.append(vrf)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="gNMI device (http://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create gNMI provider
repository = Repository(YDK_REPO_DIR+device.hostname)
provider = gNMIServiceProvider(repo=repository,
address=device.hostname,
port=device.port,
username=device.username,
password=device.password)
# create CRUD service
crud = CRUDService()
ip_domain = xr_ip_domain_cfg.IpDomain() # create object
config_ip_domain(ip_domain) # add object configuration
# create configuration on gNMI device
crud.create(provider, ip_domain)
exit()
# End of script
| 32.386792 | 76 | 0.665599 |
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.path import Repository
from ydk.services import CRUDService
from ydk.gnmi.providers import gNMIServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ip_domain_cfg \
as xr_ip_domain_cfg
import os
import logging
YDK_REPO_DIR = os.path.expanduser("~/.ydk/")
def config_ip_domain(ip_domain):
vrf = ip_domain.vrfs.Vrf()
vrf.vrf_name = "RED"
vrf.name = "red.example"
server = vrf.servers.Server()
server.order = 0
server.server_address = "2001:db8:800a::1"
vrf.servers.server.append(server)
server = vrf.servers.Server()
server.order = 1
server.server_address = "2001:db8:800a::2"
vrf.servers.server.append(server)
server = vrf.servers.Server()
server.order = 2
server.server_address = "2001:db8:800a::3"
vrf.servers.server.append(server)
ip_domain.vrfs.vrf.append(vrf)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="gNMI device (http://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
repository = Repository(YDK_REPO_DIR+device.hostname)
provider = gNMIServiceProvider(repo=repository,
address=device.hostname,
port=device.port,
username=device.username,
password=device.password)
crud = CRUDService()
ip_domain = xr_ip_domain_cfg.IpDomain()
config_ip_domain(ip_domain)
crud.create(provider, ip_domain)
exit()
| true | true |
f7140d6b0c6c816241055072b9f57a174a01a5af | 13,262 | py | Python | tests/test_zipreader.py | itsmehara/pysmi | 51347f3c4adcc030afb9bc1ded8ce72748068b1b | [
"BSD-2-Clause"
] | 121 | 2016-05-17T14:19:25.000Z | 2022-02-03T14:28:25.000Z | tests/test_zipreader.py | itsmehara/pysmi | 51347f3c4adcc030afb9bc1ded8ce72748068b1b | [
"BSD-2-Clause"
] | 61 | 2016-05-16T20:45:32.000Z | 2022-02-11T22:28:33.000Z | tests/test_zipreader.py | itsmehara/pysmi | 51347f3c4adcc030afb9bc1ded8ce72748068b1b | [
"BSD-2-Clause"
] | 50 | 2016-05-16T20:04:51.000Z | 2021-11-12T12:14:35.000Z | #
# This file is part of pysmi software.
#
# Copyright (c) 2015-2020, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysmi/license.html
#
import sys
import os
import tempfile
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
import StringIO
except ImportError:
from io import StringIO
from pysmi.reader.zipreader import ZipReader
class ZipReaderTestCase(unittest.TestCase):
zipArchive = [
80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 8, 135, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 0, 28, 0, 116, 101, 115, 116, 47, 85, 84, 9, 0, 3, 16, 211, 195, 89,
25, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80,
75, 3, 4, 10, 0, 0, 0, 0, 0, 230, 134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 12, 0, 28, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47,
85, 84, 9, 0, 3, 207, 210, 195, 89, 3, 211, 195, 89, 117, 120, 11, 0, 1, 4,
140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 230, 134,
53, 75, 102, 214, 67, 99, 2, 0, 0, 0, 2, 0, 0, 0, 17, 0, 28, 0, 116, 101, 115,
116, 47, 115, 117, 98, 100, 105, 114, 47, 116, 101, 115, 116, 65, 85, 84, 9,
0, 3, 207, 210, 195, 89, 3, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0,
0, 4, 140, 102, 0, 0, 66, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 2, 135, 53, 75,
162, 170, 2, 92, 138, 7, 0, 0, 138, 7, 0, 0, 13, 0, 28, 0, 116, 101, 115, 116,
47, 116, 101, 115, 116, 46, 122, 105, 112, 85, 84, 9, 0, 3, 3, 211, 195, 89,
3, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80,
75, 3, 4, 10, 0, 0, 0, 0, 0, 253, 134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 0, 28, 0, 116, 101, 115, 116, 47, 85, 84, 9, 0, 3, 253, 210, 195, 89, 3,
211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75,
3, 4, 10, 0, 0, 0, 0, 0, 230, 134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
12, 0, 28, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 85, 84,
9, 0, 3, 207, 210, 195, 89, 3, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102,
0, 0, 4, 140, 102, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 130, 131, 53, 75,
227, 250, 30, 37, 12, 0, 0, 0, 12, 0, 0, 0, 21, 0, 28, 0, 116, 101, 115, 116,
47, 115, 117, 98, 100, 105, 114, 47, 116, 101, 115, 116, 65, 46, 116, 120,
116, 85, 84, 9, 0, 3, 116, 204, 195, 89, 134, 204, 195, 89, 117, 120, 11, 0,
1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 115, 117, 98, 100, 105, 114, 116,
101, 115, 116, 65, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 109, 131, 53, 75, 237,
78, 102, 83, 6, 0, 0, 0, 6, 0, 0, 0, 14, 0, 28, 0, 116, 101, 115, 116, 47,
116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 9, 0, 3, 78, 204, 195, 89,
134, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0,
116, 101, 115, 116, 65, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 144, 131, 53,
75, 204, 176, 61, 249, 144, 2, 0, 0, 144, 2, 0, 0, 13, 0, 28, 0, 116, 101,
115, 116, 47, 116, 101, 115, 116, 46, 122, 105, 112, 85, 84, 9, 0, 3, 143,
204, 195, 89, 143, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4,
140, 102, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 117, 131, 53, 75, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 28, 0, 116, 101, 115, 116, 47, 85, 84, 9, 0,
3, 94, 204, 195, 89, 98, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0,
4, 140, 102, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 130, 131, 53, 75, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 28, 0, 116, 101, 115, 116, 47, 115, 117,
98, 100, 105, 114, 47, 85, 84, 9, 0, 3, 116, 204, 195, 89, 134, 204, 195,
89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 3, 4,
10, 0, 0, 0, 0, 0, 130, 131, 53, 75, 227, 250, 30, 37, 12, 0, 0, 0, 12, 0, 0,
0, 21, 0, 28, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 116,
101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 9, 0, 3, 116, 204, 195, 89, 116,
204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 115,
117, 98, 100, 105, 114, 116, 101, 115, 116, 65, 10, 80, 75, 3, 4, 10, 0, 0, 0,
0, 0, 109, 131, 53, 75, 237, 78, 102, 83, 6, 0, 0, 0, 6, 0, 0, 0, 14, 0, 28,
0, 116, 101, 115, 116, 47, 116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84,
9, 0, 3, 78, 204, 195, 89, 78, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102,
0, 0, 4, 140, 102, 0, 0, 116, 101, 115, 116, 65, 10, 80, 75, 1, 2, 30, 3, 10,
0, 0, 0, 0, 0, 117, 131, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 24,
0, 0, 0, 0, 0, 0, 0, 16, 0, 253, 65, 0, 0, 0, 0, 116, 101, 115, 116, 47, 85,
84, 5, 0, 3, 94, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140,
102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 130, 131, 53, 75, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 24, 0, 0, 0, 0, 0, 0, 0, 16, 0, 253, 65, 63,
0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 85, 84, 5,
0, 3, 116, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102,
0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 130, 131, 53, 75, 227, 250, 30,
37, 12, 0, 0, 0, 12, 0, 0, 0, 21, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129,
133, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 116,
101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 5, 0, 3, 116, 204, 195, 89, 117,
120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10,
0, 0, 0, 0, 0, 109, 131, 53, 75, 237, 78, 102, 83, 6, 0, 0, 0, 6, 0, 0, 0, 14,
0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129, 224, 0, 0, 0, 116, 101, 115, 116,
47, 116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 5, 0, 3, 78, 204, 195,
89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 5, 6, 0,
0, 0, 0, 4, 0, 4, 0, 76, 1, 0, 0, 46, 1, 0, 0, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0,
0, 0, 230, 134, 53, 75, 102, 214, 67, 99, 2, 0, 0, 0, 2, 0, 0, 0, 17, 0, 28, 0,
116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 116, 101, 115, 116,
65, 85, 84, 9, 0, 3, 207, 210, 195, 89, 207, 210, 195, 89, 117, 120, 11, 0, 1,
4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 66, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0,
0, 253, 134, 53, 75, 39, 231, 88, 122, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 28, 0,
116, 101, 115, 116, 47, 116, 101, 115, 116, 67, 85, 84, 9, 0, 3, 253, 210,
195, 89, 253, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140,
102, 0, 0, 67, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 211, 134, 53, 75, 165,
133, 110, 72, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 28, 0, 116, 101, 115, 116, 47,
116, 101, 115, 116, 65, 85, 84, 9, 0, 3, 173, 210, 195, 89, 173, 210, 195, 89,
117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 65, 10, 80, 75, 1,
2, 30, 3, 10, 0, 0, 0, 0, 0, 253, 134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 0, 24, 0, 0, 0, 0, 0, 0, 0, 16, 0, 253, 65, 0, 0, 0, 0, 116, 101, 115,
116, 47, 85, 84, 5, 0, 3, 253, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102,
0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 230, 134, 53,
75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 24, 0, 0, 0, 0, 0, 0, 0, 16,
0, 253, 65, 63, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114,
47, 85, 84, 5, 0, 3, 207, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0,
0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 130, 131, 53,
75, 227, 250, 30, 37, 12, 0, 0, 0, 12, 0, 0, 0, 21, 0, 24, 0, 0, 0, 0, 0, 1,
0, 0, 0, 180, 129, 133, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100,
105, 114, 47, 116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 5, 0, 3, 116,
204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80,
75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 109, 131, 53, 75, 237, 78, 102, 83, 6, 0,
0, 0, 6, 0, 0, 0, 14, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129, 224, 0, 0,
0, 116, 101, 115, 116, 47, 116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84,
5, 0, 3, 78, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102,
0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 144, 131, 53, 75, 204, 176, 61,
249, 144, 2, 0, 0, 144, 2, 0, 0, 13, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 180,
129, 46, 1, 0, 0, 116, 101, 115, 116, 47, 116, 101, 115, 116, 46, 122, 105,
112, 85, 84, 5, 0, 3, 143, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0,
0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 230, 134, 53, 75,
102, 214, 67, 99, 2, 0, 0, 0, 2, 0, 0, 0, 17, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0,
0, 180, 129, 5, 4, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114,
47, 116, 101, 115, 116, 65, 85, 84, 5, 0, 3, 207, 210, 195, 89, 117, 120, 11,
0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0,
0, 0, 253, 134, 53, 75, 39, 231, 88, 122, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 24,
0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129, 82, 4, 0, 0, 116, 101, 115, 116, 47, 116,
101, 115, 116, 67, 85, 84, 5, 0, 3, 253, 210, 195, 89, 117, 120, 11, 0, 1, 4,
140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0,
211, 134, 53, 75, 165, 133, 110, 72, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 24, 0, 0,
0, 0, 0, 1, 0, 0, 0, 180, 129, 152, 4, 0, 0, 116, 101, 115, 116, 47, 116, 101,
115, 116, 65, 85, 84, 5, 0, 3, 173, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140,
102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 5, 6, 0, 0, 0, 0, 8, 0, 8, 0, 150, 2,
0, 0, 222, 4, 0, 0, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 211, 134, 53, 75,
165, 133, 110, 72, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 28, 0, 116, 101, 115, 116,
47, 116, 101, 115, 116, 65, 85, 84, 9, 0, 3, 173, 210, 195, 89, 3, 211, 195,
89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 65, 10, 80, 75,
1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 8, 135, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 0, 24, 0, 0, 0, 0, 0, 0, 0, 16, 0, 253, 65, 0, 0, 0, 0, 116, 101,
115, 116, 47, 85, 84, 5, 0, 3, 16, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140,
102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 230,
134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 24, 0, 0, 0, 0, 0, 0,
0, 16, 0, 253, 65, 63, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100,
105, 114, 47, 85, 84, 5, 0, 3, 207, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140,
102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 230,
134, 53, 75, 102, 214, 67, 99, 2, 0, 0, 0, 2, 0, 0, 0, 17, 0, 24, 0, 0, 0, 0,
0, 1, 0, 0, 0, 180, 129, 133, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98,
100, 105, 114, 47, 116, 101, 115, 116, 65, 85, 84, 5, 0, 3, 207, 210, 195,
89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2,
30, 3, 10, 0, 0, 0, 0, 0, 2, 135, 53, 75, 162, 170, 2, 92, 138, 7, 0, 0, 138,
7, 0, 0, 13, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 180, 129, 210, 0, 0, 0, 116,
101, 115, 116, 47, 116, 101, 115, 116, 46, 122, 105, 112, 85, 84, 5, 0, 3,
3, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0,
80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 211, 134, 53, 75, 165, 133, 110, 72,
2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129, 163,
8, 0, 0, 116, 101, 115, 116, 47, 116, 101, 115, 116, 65, 85, 84, 5, 0, 3,
173, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0,
80, 75, 5, 6, 0, 0, 0, 0, 5, 0, 5, 0, 151, 1, 0, 0, 233, 8, 0, 0, 0, 0]
if sys.version_info[0] < 3:
zipContents = ''.join([chr(x) for x in zipArchive])
else:
zipContents = bytes(zipArchive)
def testGetDataFromFile(self):
filename = None
try:
fd, filename = tempfile.mkstemp()
os.write(fd, self.zipContents)
os.close(fd)
zipReader = ZipReader(filename)
mibinfo, data = zipReader.getData('testA')
assert data == 'A\n'
except Exception:
pass
if filename:
try:
os.remove(filename)
except Exception:
pass
def testGetInnerZipData(self):
filename = None
try:
fd, filename = tempfile.mkstemp()
os.write(fd, self.zipContents)
os.close(fd)
zipReader = ZipReader(filename)
mibinfo, data = zipReader.getData('testC')
assert data == 'C\n'
except Exception:
pass
if filename:
try:
os.remove(filename)
except Exception:
pass
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| 59.205357 | 87 | 0.454833 |
import sys
import os
import tempfile
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
import StringIO
except ImportError:
from io import StringIO
from pysmi.reader.zipreader import ZipReader
class ZipReaderTestCase(unittest.TestCase):
zipArchive = [
80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 8, 135, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 0, 28, 0, 116, 101, 115, 116, 47, 85, 84, 9, 0, 3, 16, 211, 195, 89,
25, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80,
75, 3, 4, 10, 0, 0, 0, 0, 0, 230, 134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 12, 0, 28, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47,
85, 84, 9, 0, 3, 207, 210, 195, 89, 3, 211, 195, 89, 117, 120, 11, 0, 1, 4,
140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 230, 134,
53, 75, 102, 214, 67, 99, 2, 0, 0, 0, 2, 0, 0, 0, 17, 0, 28, 0, 116, 101, 115,
116, 47, 115, 117, 98, 100, 105, 114, 47, 116, 101, 115, 116, 65, 85, 84, 9,
0, 3, 207, 210, 195, 89, 3, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0,
0, 4, 140, 102, 0, 0, 66, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 2, 135, 53, 75,
162, 170, 2, 92, 138, 7, 0, 0, 138, 7, 0, 0, 13, 0, 28, 0, 116, 101, 115, 116,
47, 116, 101, 115, 116, 46, 122, 105, 112, 85, 84, 9, 0, 3, 3, 211, 195, 89,
3, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80,
75, 3, 4, 10, 0, 0, 0, 0, 0, 253, 134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 0, 28, 0, 116, 101, 115, 116, 47, 85, 84, 9, 0, 3, 253, 210, 195, 89, 3,
211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75,
3, 4, 10, 0, 0, 0, 0, 0, 230, 134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
12, 0, 28, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 85, 84,
9, 0, 3, 207, 210, 195, 89, 3, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102,
0, 0, 4, 140, 102, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 130, 131, 53, 75,
227, 250, 30, 37, 12, 0, 0, 0, 12, 0, 0, 0, 21, 0, 28, 0, 116, 101, 115, 116,
47, 115, 117, 98, 100, 105, 114, 47, 116, 101, 115, 116, 65, 46, 116, 120,
116, 85, 84, 9, 0, 3, 116, 204, 195, 89, 134, 204, 195, 89, 117, 120, 11, 0,
1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 115, 117, 98, 100, 105, 114, 116,
101, 115, 116, 65, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 109, 131, 53, 75, 237,
78, 102, 83, 6, 0, 0, 0, 6, 0, 0, 0, 14, 0, 28, 0, 116, 101, 115, 116, 47,
116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 9, 0, 3, 78, 204, 195, 89,
134, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0,
116, 101, 115, 116, 65, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 144, 131, 53,
75, 204, 176, 61, 249, 144, 2, 0, 0, 144, 2, 0, 0, 13, 0, 28, 0, 116, 101,
115, 116, 47, 116, 101, 115, 116, 46, 122, 105, 112, 85, 84, 9, 0, 3, 143,
204, 195, 89, 143, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4,
140, 102, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 117, 131, 53, 75, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 28, 0, 116, 101, 115, 116, 47, 85, 84, 9, 0,
3, 94, 204, 195, 89, 98, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0,
4, 140, 102, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 130, 131, 53, 75, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 28, 0, 116, 101, 115, 116, 47, 115, 117,
98, 100, 105, 114, 47, 85, 84, 9, 0, 3, 116, 204, 195, 89, 134, 204, 195,
89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 3, 4,
10, 0, 0, 0, 0, 0, 130, 131, 53, 75, 227, 250, 30, 37, 12, 0, 0, 0, 12, 0, 0,
0, 21, 0, 28, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 116,
101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 9, 0, 3, 116, 204, 195, 89, 116,
204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 115,
117, 98, 100, 105, 114, 116, 101, 115, 116, 65, 10, 80, 75, 3, 4, 10, 0, 0, 0,
0, 0, 109, 131, 53, 75, 237, 78, 102, 83, 6, 0, 0, 0, 6, 0, 0, 0, 14, 0, 28,
0, 116, 101, 115, 116, 47, 116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84,
9, 0, 3, 78, 204, 195, 89, 78, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102,
0, 0, 4, 140, 102, 0, 0, 116, 101, 115, 116, 65, 10, 80, 75, 1, 2, 30, 3, 10,
0, 0, 0, 0, 0, 117, 131, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 24,
0, 0, 0, 0, 0, 0, 0, 16, 0, 253, 65, 0, 0, 0, 0, 116, 101, 115, 116, 47, 85,
84, 5, 0, 3, 94, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140,
102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 130, 131, 53, 75, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 24, 0, 0, 0, 0, 0, 0, 0, 16, 0, 253, 65, 63,
0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 85, 84, 5,
0, 3, 116, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102,
0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 130, 131, 53, 75, 227, 250, 30,
37, 12, 0, 0, 0, 12, 0, 0, 0, 21, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129,
133, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 116,
101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 5, 0, 3, 116, 204, 195, 89, 117,
120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10,
0, 0, 0, 0, 0, 109, 131, 53, 75, 237, 78, 102, 83, 6, 0, 0, 0, 6, 0, 0, 0, 14,
0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129, 224, 0, 0, 0, 116, 101, 115, 116,
47, 116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 5, 0, 3, 78, 204, 195,
89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 5, 6, 0,
0, 0, 0, 4, 0, 4, 0, 76, 1, 0, 0, 46, 1, 0, 0, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0,
0, 0, 230, 134, 53, 75, 102, 214, 67, 99, 2, 0, 0, 0, 2, 0, 0, 0, 17, 0, 28, 0,
116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114, 47, 116, 101, 115, 116,
65, 85, 84, 9, 0, 3, 207, 210, 195, 89, 207, 210, 195, 89, 117, 120, 11, 0, 1,
4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 66, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0,
0, 253, 134, 53, 75, 39, 231, 88, 122, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 28, 0,
116, 101, 115, 116, 47, 116, 101, 115, 116, 67, 85, 84, 9, 0, 3, 253, 210,
195, 89, 253, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140,
102, 0, 0, 67, 10, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 211, 134, 53, 75, 165,
133, 110, 72, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 28, 0, 116, 101, 115, 116, 47,
116, 101, 115, 116, 65, 85, 84, 9, 0, 3, 173, 210, 195, 89, 173, 210, 195, 89,
117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 65, 10, 80, 75, 1,
2, 30, 3, 10, 0, 0, 0, 0, 0, 253, 134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 5, 0, 24, 0, 0, 0, 0, 0, 0, 0, 16, 0, 253, 65, 0, 0, 0, 0, 116, 101, 115,
116, 47, 85, 84, 5, 0, 3, 253, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102,
0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 230, 134, 53,
75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 24, 0, 0, 0, 0, 0, 0, 0, 16,
0, 253, 65, 63, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114,
47, 85, 84, 5, 0, 3, 207, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0,
0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 130, 131, 53,
75, 227, 250, 30, 37, 12, 0, 0, 0, 12, 0, 0, 0, 21, 0, 24, 0, 0, 0, 0, 0, 1,
0, 0, 0, 180, 129, 133, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100,
105, 114, 47, 116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84, 5, 0, 3, 116,
204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80,
75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 109, 131, 53, 75, 237, 78, 102, 83, 6, 0,
0, 0, 6, 0, 0, 0, 14, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129, 224, 0, 0,
0, 116, 101, 115, 116, 47, 116, 101, 115, 116, 65, 46, 116, 120, 116, 85, 84,
5, 0, 3, 78, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102,
0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 144, 131, 53, 75, 204, 176, 61,
249, 144, 2, 0, 0, 144, 2, 0, 0, 13, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 180,
129, 46, 1, 0, 0, 116, 101, 115, 116, 47, 116, 101, 115, 116, 46, 122, 105,
112, 85, 84, 5, 0, 3, 143, 204, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0,
0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 230, 134, 53, 75,
102, 214, 67, 99, 2, 0, 0, 0, 2, 0, 0, 0, 17, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0,
0, 180, 129, 5, 4, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100, 105, 114,
47, 116, 101, 115, 116, 65, 85, 84, 5, 0, 3, 207, 210, 195, 89, 117, 120, 11,
0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0,
0, 0, 253, 134, 53, 75, 39, 231, 88, 122, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 24,
0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129, 82, 4, 0, 0, 116, 101, 115, 116, 47, 116,
101, 115, 116, 67, 85, 84, 5, 0, 3, 253, 210, 195, 89, 117, 120, 11, 0, 1, 4,
140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0,
211, 134, 53, 75, 165, 133, 110, 72, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 24, 0, 0,
0, 0, 0, 1, 0, 0, 0, 180, 129, 152, 4, 0, 0, 116, 101, 115, 116, 47, 116, 101,
115, 116, 65, 85, 84, 5, 0, 3, 173, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140,
102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 5, 6, 0, 0, 0, 0, 8, 0, 8, 0, 150, 2,
0, 0, 222, 4, 0, 0, 0, 0, 80, 75, 3, 4, 10, 0, 0, 0, 0, 0, 211, 134, 53, 75,
165, 133, 110, 72, 2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 28, 0, 116, 101, 115, 116,
47, 116, 101, 115, 116, 65, 85, 84, 9, 0, 3, 173, 210, 195, 89, 3, 211, 195,
89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 65, 10, 80, 75,
1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 8, 135, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 5, 0, 24, 0, 0, 0, 0, 0, 0, 0, 16, 0, 253, 65, 0, 0, 0, 0, 116, 101,
115, 116, 47, 85, 84, 5, 0, 3, 16, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140,
102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 230,
134, 53, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 24, 0, 0, 0, 0, 0, 0,
0, 16, 0, 253, 65, 63, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98, 100,
105, 114, 47, 85, 84, 5, 0, 3, 207, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140,
102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 230,
134, 53, 75, 102, 214, 67, 99, 2, 0, 0, 0, 2, 0, 0, 0, 17, 0, 24, 0, 0, 0, 0,
0, 1, 0, 0, 0, 180, 129, 133, 0, 0, 0, 116, 101, 115, 116, 47, 115, 117, 98,
100, 105, 114, 47, 116, 101, 115, 116, 65, 85, 84, 5, 0, 3, 207, 210, 195,
89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0, 80, 75, 1, 2,
30, 3, 10, 0, 0, 0, 0, 0, 2, 135, 53, 75, 162, 170, 2, 92, 138, 7, 0, 0, 138,
7, 0, 0, 13, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 180, 129, 210, 0, 0, 0, 116,
101, 115, 116, 47, 116, 101, 115, 116, 46, 122, 105, 112, 85, 84, 5, 0, 3,
3, 211, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0,
80, 75, 1, 2, 30, 3, 10, 0, 0, 0, 0, 0, 211, 134, 53, 75, 165, 133, 110, 72,
2, 0, 0, 0, 2, 0, 0, 0, 10, 0, 24, 0, 0, 0, 0, 0, 1, 0, 0, 0, 180, 129, 163,
8, 0, 0, 116, 101, 115, 116, 47, 116, 101, 115, 116, 65, 85, 84, 5, 0, 3,
173, 210, 195, 89, 117, 120, 11, 0, 1, 4, 140, 102, 0, 0, 4, 140, 102, 0, 0,
80, 75, 5, 6, 0, 0, 0, 0, 5, 0, 5, 0, 151, 1, 0, 0, 233, 8, 0, 0, 0, 0]
if sys.version_info[0] < 3:
zipContents = ''.join([chr(x) for x in zipArchive])
else:
zipContents = bytes(zipArchive)
def testGetDataFromFile(self):
filename = None
try:
fd, filename = tempfile.mkstemp()
os.write(fd, self.zipContents)
os.close(fd)
zipReader = ZipReader(filename)
mibinfo, data = zipReader.getData('testA')
assert data == 'A\n'
except Exception:
pass
if filename:
try:
os.remove(filename)
except Exception:
pass
def testGetInnerZipData(self):
filename = None
try:
fd, filename = tempfile.mkstemp()
os.write(fd, self.zipContents)
os.close(fd)
zipReader = ZipReader(filename)
mibinfo, data = zipReader.getData('testC')
assert data == 'C\n'
except Exception:
pass
if filename:
try:
os.remove(filename)
except Exception:
pass
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| true | true |
f7140dc72de40d4e3c5a116bf6731262aa4867da | 4,343 | py | Python | multiaddr/multiaddr.py | DalavanCloud/py-multiaddr | e1cce8ebacc6f25d0aadc0a913edb5ae2be3ee73 | [
"Apache-2.0",
"MIT"
] | 1 | 2018-12-24T22:07:47.000Z | 2018-12-24T22:07:47.000Z | multiaddr/multiaddr.py | DalavanCloud/py-multiaddr | e1cce8ebacc6f25d0aadc0a913edb5ae2be3ee73 | [
"Apache-2.0",
"MIT"
] | null | null | null | multiaddr/multiaddr.py | DalavanCloud/py-multiaddr | e1cce8ebacc6f25d0aadc0a913edb5ae2be3ee73 | [
"Apache-2.0",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import binascii
from copy import copy
from .codec import size_for_addr
from .codec import string_to_bytes
from .codec import bytes_to_string
from .codec import protocol_with_name
from .protocols import protocol_with_code
from .protocols import read_varint_code
class ProtocolNotFoundException(Exception):
pass
class Multiaddr(object):
"""Multiaddr is a representation of multiple nested internet addresses.
Multiaddr is a cross-protocol, cross-platform format for representing
internet addresses. It emphasizes explicitness and self-description.
Learn more here: https://github.com/jbenet/multiaddr
Multiaddrs have both a binary and string representation.
>>> from multiaddr import Multiaddr
>>> addr = Multiaddr("/ip4/1.2.3.4/tcp/80")
Multiaddr objects are immutable, so `encapsulate` and `decapsulate`
return new objects rather than modify internal state.
"""
def __init__(self, addr):
"""Instantiate a new Multiaddr.
Args:
addr : A string-encoded or a byte-encoded Multiaddr
"""
if isinstance(addr, str):
self._bytes = string_to_bytes(addr)
elif isinstance(addr, bytes):
self._bytes = addr
else:
raise ValueError("Invalid address type, must be bytes or str")
def __eq__(self, other):
"""Checks if two Multiaddr objects are exactly equal."""
return self._bytes == other._bytes
def __ne__(self, other):
return not (self == other)
def __str__(self):
"""Return the string representation of this Multiaddr.
May raise an exception if the internal state of the Multiaddr is
corrupted."""
try:
return bytes_to_string(self._bytes)
except Exception:
raise ValueError(
"multiaddr failed to convert back to string. corrupted?")
def __repr__(self):
return "<Multiaddr %s>" % str(self)
def to_bytes(self):
"""Returns the byte array representation of this Multiaddr."""
return self._bytes
def protocols(self):
"""Returns a list of Protocols this Multiaddr includes."""
buf = binascii.unhexlify(self.to_bytes())
protos = []
while buf:
code, num_bytes_read = read_varint_code(buf)
proto = protocol_with_code(code)
protos.append(proto)
buf = buf[num_bytes_read:]
size = size_for_addr(proto, buf)
buf = buf[size:]
return protos
def encapsulate(self, other):
"""Wrap this Multiaddr around another.
For example:
/ip4/1.2.3.4 encapsulate /tcp/80 = /ip4/1.2.3.4/tcp/80
"""
mb = self.to_bytes()
ob = other.to_bytes()
return Multiaddr(b''.join([mb, ob]))
def decapsulate(self, other):
"""Remove a Multiaddr wrapping.
For example:
/ip4/1.2.3.4/tcp/80 decapsulate /ip4/1.2.3.4 = /tcp/80
"""
s1 = str(self)
s2 = str(other)
try:
idx = s1.rindex(s2)
except ValueError:
# if multiaddr not contained, returns a copy
return copy(self)
try:
return Multiaddr(s1[:idx])
except Exception as ex:
raise ValueError(
"Multiaddr.decapsulate incorrect byte boundaries: %s"
% str(ex))
def value_for_protocol(self, code):
"""Return the value (if any) following the specified protocol."""
from .util import split
if isinstance(code, str):
protocol = protocol_with_name(code)
code = protocol.code
for sub_addr in split(self):
if sub_addr.protocols()[0].code == code:
addr_parts = str(sub_addr).split("/")
if len(addr_parts) > 3:
raise ValueError("Unknown Protocol format")
elif len(addr_parts) == 3:
# If we have an address, return it
return addr_parts[2]
elif len(addr_parts) == 2:
# We were given something like '/utp', which doesn't have
# an address, so return ''
return ''
raise ProtocolNotFoundException()
| 31.70073 | 77 | 0.593599 |
import binascii
from copy import copy
from .codec import size_for_addr
from .codec import string_to_bytes
from .codec import bytes_to_string
from .codec import protocol_with_name
from .protocols import protocol_with_code
from .protocols import read_varint_code
class ProtocolNotFoundException(Exception):
pass
class Multiaddr(object):
def __init__(self, addr):
if isinstance(addr, str):
self._bytes = string_to_bytes(addr)
elif isinstance(addr, bytes):
self._bytes = addr
else:
raise ValueError("Invalid address type, must be bytes or str")
def __eq__(self, other):
return self._bytes == other._bytes
def __ne__(self, other):
return not (self == other)
def __str__(self):
try:
return bytes_to_string(self._bytes)
except Exception:
raise ValueError(
"multiaddr failed to convert back to string. corrupted?")
def __repr__(self):
return "<Multiaddr %s>" % str(self)
def to_bytes(self):
return self._bytes
def protocols(self):
buf = binascii.unhexlify(self.to_bytes())
protos = []
while buf:
code, num_bytes_read = read_varint_code(buf)
proto = protocol_with_code(code)
protos.append(proto)
buf = buf[num_bytes_read:]
size = size_for_addr(proto, buf)
buf = buf[size:]
return protos
def encapsulate(self, other):
mb = self.to_bytes()
ob = other.to_bytes()
return Multiaddr(b''.join([mb, ob]))
def decapsulate(self, other):
s1 = str(self)
s2 = str(other)
try:
idx = s1.rindex(s2)
except ValueError:
return copy(self)
try:
return Multiaddr(s1[:idx])
except Exception as ex:
raise ValueError(
"Multiaddr.decapsulate incorrect byte boundaries: %s"
% str(ex))
def value_for_protocol(self, code):
from .util import split
if isinstance(code, str):
protocol = protocol_with_name(code)
code = protocol.code
for sub_addr in split(self):
if sub_addr.protocols()[0].code == code:
addr_parts = str(sub_addr).split("/")
if len(addr_parts) > 3:
raise ValueError("Unknown Protocol format")
elif len(addr_parts) == 3:
return addr_parts[2]
elif len(addr_parts) == 2:
# an address, so return ''
return ''
raise ProtocolNotFoundException()
| true | true |
f7140dda44b8773a6814740e7a397d7b4ea099ad | 223 | py | Python | profiles/urls.py | ErnestaMajute/lucentCavern | a98253c5192456637f7af6400eee39cf5363d838 | [
"W3C",
"PostgreSQL"
] | null | null | null | profiles/urls.py | ErnestaMajute/lucentCavern | a98253c5192456637f7af6400eee39cf5363d838 | [
"W3C",
"PostgreSQL"
] | null | null | null | profiles/urls.py | ErnestaMajute/lucentCavern | a98253c5192456637f7af6400eee39cf5363d838 | [
"W3C",
"PostgreSQL"
] | 1 | 2021-07-19T14:29:15.000Z | 2021-07-19T14:29:15.000Z | from django.urls import path
from . import views
urlpatterns = [
path('', views.profile, name='profile'),
path(
'order_history/<order_number>',
views.order_history,
name='order_history'),
]
| 20.272727 | 44 | 0.632287 | from django.urls import path
from . import views
urlpatterns = [
path('', views.profile, name='profile'),
path(
'order_history/<order_number>',
views.order_history,
name='order_history'),
]
| true | true |
f7140efb0d92fdd73416da8a46ac3e435a8a01e6 | 3,647 | py | Python | autosklearn/data/abstract_data_manager.py | tuggeluk/auto-sklearn | 202918e5641701c696b995039d06bfec81973cc6 | [
"BSD-3-Clause"
] | 1 | 2017-08-13T13:57:40.000Z | 2017-08-13T13:57:40.000Z | autosklearn/data/abstract_data_manager.py | chrinide/auto-sklearn | 1c6af59ff61f1d0a3b54b16a35ffbc5d2d3828cd | [
"BSD-3-Clause"
] | null | null | null | autosklearn/data/abstract_data_manager.py | chrinide/auto-sklearn | 1c6af59ff61f1d0a3b54b16a35ffbc5d2d3828cd | [
"BSD-3-Clause"
] | 1 | 2020-05-06T14:47:17.000Z | 2020-05-06T14:47:17.000Z | # -*- encoding: utf-8 -*-
import abc
import numpy as np
import scipy.sparse
from autosklearn.pipeline.implementations.OneHotEncoder import OneHotEncoder
from autosklearn.util import predict_RAM_usage
def perform_one_hot_encoding(sparse, categorical, data):
predicted_RAM_usage = float(
predict_RAM_usage(data[0], categorical)) / 1024 / 1024
if predicted_RAM_usage > 1000:
sparse = True
rvals = []
if any(categorical):
encoder = OneHotEncoder(categorical_features=categorical,
dtype=np.float32,
sparse=sparse)
rvals.append(encoder.fit_transform(data[0]))
for d in data[1:]:
rvals.append(encoder.transform(d))
if not sparse and scipy.sparse.issparse(rvals[0]):
for i in range(len(rvals)):
rvals[i] = rvals[i].todense()
else:
rvals = data
return rvals, sparse
class AbstractDataManager():
__metaclass__ = abc.ABCMeta
def __init__(self, name):
self._data = dict()
self._info = dict()
self._name = name
@property
def name(self):
return self._name
@property
def data(self):
return self._data
@property
def info(self):
return self._info
@property
def feat_type(self):
return self._feat_type
@feat_type.setter
def feat_type(self, value):
self._feat_type = value
@property
def encoder(self):
return self._encoder
@encoder.setter
def encoder(self, value):
self._encoder = value
def perform1HotEncoding(self):
sparse = True if self.info['is_sparse'] == 1 else False
has_missing = True if self.info['has_missing'] else False
to_encode = ['categorical']
if has_missing:
to_encode += ['binary']
encoding_mask = [feat_type.lower() in to_encode
for feat_type in self.feat_type]
data = [self.data['X_train']]
if 'X_valid' in self.data:
data.append(self.data['X_valid'])
if 'X_test' in self.data:
data.append(self.data['X_test'])
data, sparse = perform_one_hot_encoding(
sparse=sparse, categorical=encoding_mask,
data=data)
self.info['is_sparse'] = 1 if sparse else 0
self.data['X_train'] = data[0]
if 'X_valid' in self.data and 'X_test' in self.data:
self.data['X_valid'] = data[1]
self.data['X_test'] = data[2]
elif 'X_valid' in self.data:
self.data['X_valid'] = data[1]
elif 'X_test' in self.data:
self.data['X_test'] = data[1]
def __repr__(self):
return 'DataManager : ' + self.name
def __str__(self):
val = 'DataManager : ' + self.name + '\ninfo:\n'
for item in self.info:
val = val + '\t' + item + ' = ' + str(self.info[item]) + '\n'
val = val + 'data:\n'
for subset in self.data:
val = val + '\t%s = %s %s %s\n' % (subset, type(self.data[subset]),
str(self.data[subset].shape),
str(self.data[subset].dtype))
if isinstance(self.data[subset], scipy.sparse.spmatrix):
val = val + '\tdensity: %f\n' % \
(float(len(self.data[subset].data)) /
self.data[subset].shape[0] /
self.data[subset].shape[1])
val = val + 'feat_type:\t' + str(self.feat_type) + '\n'
return val
| 30.391667 | 79 | 0.550315 |
import abc
import numpy as np
import scipy.sparse
from autosklearn.pipeline.implementations.OneHotEncoder import OneHotEncoder
from autosklearn.util import predict_RAM_usage
def perform_one_hot_encoding(sparse, categorical, data):
predicted_RAM_usage = float(
predict_RAM_usage(data[0], categorical)) / 1024 / 1024
if predicted_RAM_usage > 1000:
sparse = True
rvals = []
if any(categorical):
encoder = OneHotEncoder(categorical_features=categorical,
dtype=np.float32,
sparse=sparse)
rvals.append(encoder.fit_transform(data[0]))
for d in data[1:]:
rvals.append(encoder.transform(d))
if not sparse and scipy.sparse.issparse(rvals[0]):
for i in range(len(rvals)):
rvals[i] = rvals[i].todense()
else:
rvals = data
return rvals, sparse
class AbstractDataManager():
__metaclass__ = abc.ABCMeta
def __init__(self, name):
self._data = dict()
self._info = dict()
self._name = name
@property
def name(self):
return self._name
@property
def data(self):
return self._data
@property
def info(self):
return self._info
@property
def feat_type(self):
return self._feat_type
@feat_type.setter
def feat_type(self, value):
self._feat_type = value
@property
def encoder(self):
return self._encoder
@encoder.setter
def encoder(self, value):
self._encoder = value
def perform1HotEncoding(self):
sparse = True if self.info['is_sparse'] == 1 else False
has_missing = True if self.info['has_missing'] else False
to_encode = ['categorical']
if has_missing:
to_encode += ['binary']
encoding_mask = [feat_type.lower() in to_encode
for feat_type in self.feat_type]
data = [self.data['X_train']]
if 'X_valid' in self.data:
data.append(self.data['X_valid'])
if 'X_test' in self.data:
data.append(self.data['X_test'])
data, sparse = perform_one_hot_encoding(
sparse=sparse, categorical=encoding_mask,
data=data)
self.info['is_sparse'] = 1 if sparse else 0
self.data['X_train'] = data[0]
if 'X_valid' in self.data and 'X_test' in self.data:
self.data['X_valid'] = data[1]
self.data['X_test'] = data[2]
elif 'X_valid' in self.data:
self.data['X_valid'] = data[1]
elif 'X_test' in self.data:
self.data['X_test'] = data[1]
def __repr__(self):
return 'DataManager : ' + self.name
def __str__(self):
val = 'DataManager : ' + self.name + '\ninfo:\n'
for item in self.info:
val = val + '\t' + item + ' = ' + str(self.info[item]) + '\n'
val = val + 'data:\n'
for subset in self.data:
val = val + '\t%s = %s %s %s\n' % (subset, type(self.data[subset]),
str(self.data[subset].shape),
str(self.data[subset].dtype))
if isinstance(self.data[subset], scipy.sparse.spmatrix):
val = val + '\tdensity: %f\n' % \
(float(len(self.data[subset].data)) /
self.data[subset].shape[0] /
self.data[subset].shape[1])
val = val + 'feat_type:\t' + str(self.feat_type) + '\n'
return val
| true | true |
f7140f0751f48afe56cd8ba43fa9cea43551623a | 1,042 | py | Python | app/core/migrations/0004_recipe.py | devmahmud/recipe-app-api | 8cf5cef38e25b6a43b013d4f65b0b8115e27ef10 | [
"MIT"
] | 1 | 2020-04-05T12:55:29.000Z | 2020-04-05T12:55:29.000Z | app/core/migrations/0004_recipe.py | devmahmud/recipe-app-api | 8cf5cef38e25b6a43b013d4f65b0b8115e27ef10 | [
"MIT"
] | null | null | null | app/core/migrations/0004_recipe.py | devmahmud/recipe-app-api | 8cf5cef38e25b6a43b013d4f65b0b8115e27ef10 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-04-11 04:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.931034 | 118 | 0.603647 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f7140f27a25c6348d47859e93ebdb50708467af5 | 3,433 | py | Python | travel_blog/travel_blog/settings.py | kennethlove/travel_blog_livestream | a6bc74e99b4922b5ac0d3a8ed0f2d5dbebde6fd2 | [
"MIT"
] | 2 | 2016-12-17T02:56:22.000Z | 2017-07-19T05:13:59.000Z | travel_blog/travel_blog/settings.py | kennethlove/travel_blog_livestream | a6bc74e99b4922b5ac0d3a8ed0f2d5dbebde6fd2 | [
"MIT"
] | null | null | null | travel_blog/travel_blog/settings.py | kennethlove/travel_blog_livestream | a6bc74e99b4922b5ac0d3a8ed0f2d5dbebde6fd2 | [
"MIT"
] | 1 | 2018-12-19T06:34:37.000Z | 2018-12-19T06:34:37.000Z | """
Django settings for travel_blog project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd$^_aoggnuh-=s=kpxb*2qkr+%)^^0cnm8h32h@qq*&1k8*g^l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.gis',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'travel_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'travel_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.postgresql',
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'HOST': 'localhost',
'NAME': 'travel_blog_db',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'assets'),)
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
MEDIA_URL = '/uploads/'
| 26.206107 | 91 | 0.694436 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'd$^_aoggnuh-=s=kpxb*2qkr+%)^^0cnm8h32h@qq*&1k8*g^l'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.gis',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'travel_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'travel_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.postgresql',
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'HOST': 'localhost',
'NAME': 'travel_blog_db',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'assets'),)
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
MEDIA_URL = '/uploads/'
| true | true |
f714115dff427760c5619baec448dfd0b676ee52 | 8,961 | py | Python | privilege/tests/group.py | luodaihong/django-privilege | 1f6a6b410221aa8be109e43326b3a9d7d614287b | [
"Apache-2.0"
] | null | null | null | privilege/tests/group.py | luodaihong/django-privilege | 1f6a6b410221aa8be109e43326b3a9d7d614287b | [
"Apache-2.0"
] | null | null | null | privilege/tests/group.py | luodaihong/django-privilege | 1f6a6b410221aa8be109e43326b3a9d7d614287b | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
from django.test import Client, TestCase
from django.contrib.auth.models import Group
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.utils import simplejson
from privilege.core.config import GROUP_CACHE_KEY
class GroupTestCases(TestCase):
fixtures = ['privilege.json']
def setUp(self):
TestCase.setUp(self)
self.client = Client()
def tearDown(self):
self.client.logout()
TestCase.tearDown(self)
def test_group_list_not_login(self):
group_list_url = reverse("privilege.views.group.group_list", args=(1, ))
self.check_not_login(group_list_url)
def test_group_list_logined_but_not_superuser(self):
group_list_url = reverse("privilege.views.group.group_list", args=(1, ))
self.check_not_superuser(group_list_url)
def test_group_list_ok(self):
group_list_url = reverse("privilege.views.group.group_list", args=(1, ))
self.client.login(username="super", password="test")
response = self.client.get(group_list_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["page"].object_list)
def test_group_detail_not_login(self):
group_detail_url = reverse("privilege.views.group.group_detail", args=(1, 1,))
self.check_not_login(group_detail_url)
def test_get_group_detail_logined_but_not_superuser(self):
group_detail_url = reverse("privilege.views.group.group_detail", args=(1, 1,))
self.check_not_superuser(group_detail_url)
def test_get_group_detail_not_exist(self):
group_detail_url = reverse("privilege.views.group.group_detail", args=(0, 1,))
self.client.login(username="super", password="test")
response = self.client.get(group_detail_url)
self.assertEqual(response.status_code, 404)
def test_get_group_detail_ok(self):
group_detail_url = reverse("privilege.views.group.group_detail", args=(1, 1,))
self.client.login(username="super", password="test")
response = self.client.get(group_detail_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["group"])
def test_change_group_permission_not_login(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
self.check_not_login(change_group_url)
def test_change_group_permission_not_super_user(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
self.check_not_superuser(change_group_url)
def test_change_group_permission_get_method(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
self.client.login(username="super", password="test")
response = self.client.get(change_group_url)
self.assertEqual(response.status_code, 200)
expect_content = simplejson.dumps({"status": "nok", "msg": _("Fail")})
self.assertEqual(response.content, expect_content)
def test_change_group_permission_not_exist(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
post_data = {"group_id": 0}
self.client.login(username="super", password="test")
response = self.client.post(change_group_url, post_data)
self.assertEqual(response.status_code, 200)
expect_content = simplejson.dumps({"status": "nok", "msg": _("Fail")})
self.assertEqual(response.content, expect_content)
def test_change_group_permission_post_bad_params(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
post_data = {"group_id": 1, "permission_id": ""}
self.client.login(username="super", password="test")
response = self.client.post(change_group_url, post_data)
self.assertEqual(response.status_code, 200)
expect_content = simplejson.dumps({"status": "nok", "msg": _("Fail")})
self.assertEqual(response.content, expect_content)
def test_change_group_permission_ok(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
post_data = {"group_id": 1, "permission_id": "1", "op_code": "add"}
self.client.login(username="super", password="test")
response = self.client.post(change_group_url, post_data)
self.assertEqual(response.status_code, 200)
expect_content = simplejson.dumps({"status": "ok", "msg": _("Success")})
self.assertEqual(response.content, expect_content)
cache.set(GROUP_CACHE_KEY, None)
def test_add_group_not_login(self):
add_group_url = reverse("privilege.views.group.add_group")
self.check_not_login(add_group_url)
def test_add_group_not_superuser(self):
add_group_url = reverse("privilege.views.group.add_group")
self.check_not_superuser(add_group_url)
def test_add_group_not_post(self):
add_group_url = reverse("privilege.views.group.add_group")
self.client.login(username="super", password="test")
response = self.client.get(add_group_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["form"])
def test_add_group_post_blank(self):
add_group_url = reverse("privilege.views.group.add_group")
self.client.login(username="super", password="test")
response = self.client.post(add_group_url, {"name": ""})
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["form"].errors)
def test_add_group_ok(self):
add_group_url = reverse("privilege.views.group.add_group")
self.client.login(username="super", password="test")
response = self.client.post(add_group_url, {"name": "add_success"})
self.assertEqual(response.status_code, 302)
self.assertTrue(Group.objects.filter(name="add_success").count())
Group.objects.filter(name="add_success").delete()
cache.set(GROUP_CACHE_KEY, None)
def test_edit_group_not_login(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(1, ))
self.check_not_login(edit_group_url)
def test_edit_group_not_superuser(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(1, ))
self.check_not_superuser(edit_group_url)
def test_test_edit_group_not_exist(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(0, ))
self.client.login(username="super", password="test")
response = self.client.get(edit_group_url)
self.assertEqual(response.status_code, 404)
def test_test_edit_group_not_post(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(1, ))
self.client.login(username="super", password="test")
response = self.client.get(edit_group_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["form"])
def test_test_edit_group_post_blank(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(1, ))
self.client.login(username="super", password="test")
response = self.client.post(edit_group_url, {"name": ""})
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["form"].errors)
def test_test_edit_group_ok(self):
group = Group.objects.create(name="to_delete")
edit_group_url = reverse("privilege.views.group.edit_group", args=(group.id, ))
self.client.login(username="super", password="test")
response = self.client.post(edit_group_url, {"name": "changed"})
self.assertEqual(response.status_code, 302)
group = Group.objects.get(id=group.id)
self.assertEqual(group.name, "changed")
group.delete()
cache.set(GROUP_CACHE_KEY, None)
def test_delete_grooup_not_login(self):
delete_group_url = reverse("privilege.views.group.delete_group", args=(1, ))
self.check_not_login(delete_group_url)
def test_delete_grooup_not_superuser(self):
delete_group_url = reverse("privilege.views.group.delete_group", args=(1, ))
self.check_not_superuser(delete_group_url)
def test_delete_grooup_ok(self):
delete_group_url = reverse("privilege.views.group.delete_group", args=(0, ))
response = self.client.post(delete_group_url)
self.assertEqual(response.status_code, 302)
cache.set(GROUP_CACHE_KEY, None)
def check_not_login(self, url):
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def check_not_superuser(self, url):
self.client.login(username="test", password="test")
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
| 44.142857 | 87 | 0.703828 |
from django.test import Client, TestCase
from django.contrib.auth.models import Group
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.utils import simplejson
from privilege.core.config import GROUP_CACHE_KEY
class GroupTestCases(TestCase):
fixtures = ['privilege.json']
def setUp(self):
TestCase.setUp(self)
self.client = Client()
def tearDown(self):
self.client.logout()
TestCase.tearDown(self)
def test_group_list_not_login(self):
group_list_url = reverse("privilege.views.group.group_list", args=(1, ))
self.check_not_login(group_list_url)
def test_group_list_logined_but_not_superuser(self):
group_list_url = reverse("privilege.views.group.group_list", args=(1, ))
self.check_not_superuser(group_list_url)
def test_group_list_ok(self):
group_list_url = reverse("privilege.views.group.group_list", args=(1, ))
self.client.login(username="super", password="test")
response = self.client.get(group_list_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["page"].object_list)
def test_group_detail_not_login(self):
group_detail_url = reverse("privilege.views.group.group_detail", args=(1, 1,))
self.check_not_login(group_detail_url)
def test_get_group_detail_logined_but_not_superuser(self):
group_detail_url = reverse("privilege.views.group.group_detail", args=(1, 1,))
self.check_not_superuser(group_detail_url)
def test_get_group_detail_not_exist(self):
group_detail_url = reverse("privilege.views.group.group_detail", args=(0, 1,))
self.client.login(username="super", password="test")
response = self.client.get(group_detail_url)
self.assertEqual(response.status_code, 404)
def test_get_group_detail_ok(self):
group_detail_url = reverse("privilege.views.group.group_detail", args=(1, 1,))
self.client.login(username="super", password="test")
response = self.client.get(group_detail_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["group"])
def test_change_group_permission_not_login(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
self.check_not_login(change_group_url)
def test_change_group_permission_not_super_user(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
self.check_not_superuser(change_group_url)
def test_change_group_permission_get_method(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
self.client.login(username="super", password="test")
response = self.client.get(change_group_url)
self.assertEqual(response.status_code, 200)
expect_content = simplejson.dumps({"status": "nok", "msg": _("Fail")})
self.assertEqual(response.content, expect_content)
def test_change_group_permission_not_exist(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
post_data = {"group_id": 0}
self.client.login(username="super", password="test")
response = self.client.post(change_group_url, post_data)
self.assertEqual(response.status_code, 200)
expect_content = simplejson.dumps({"status": "nok", "msg": _("Fail")})
self.assertEqual(response.content, expect_content)
def test_change_group_permission_post_bad_params(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
post_data = {"group_id": 1, "permission_id": ""}
self.client.login(username="super", password="test")
response = self.client.post(change_group_url, post_data)
self.assertEqual(response.status_code, 200)
expect_content = simplejson.dumps({"status": "nok", "msg": _("Fail")})
self.assertEqual(response.content, expect_content)
def test_change_group_permission_ok(self):
change_group_url = reverse("privilege.views.group.change_group_permission")
post_data = {"group_id": 1, "permission_id": "1", "op_code": "add"}
self.client.login(username="super", password="test")
response = self.client.post(change_group_url, post_data)
self.assertEqual(response.status_code, 200)
expect_content = simplejson.dumps({"status": "ok", "msg": _("Success")})
self.assertEqual(response.content, expect_content)
cache.set(GROUP_CACHE_KEY, None)
def test_add_group_not_login(self):
add_group_url = reverse("privilege.views.group.add_group")
self.check_not_login(add_group_url)
def test_add_group_not_superuser(self):
add_group_url = reverse("privilege.views.group.add_group")
self.check_not_superuser(add_group_url)
def test_add_group_not_post(self):
add_group_url = reverse("privilege.views.group.add_group")
self.client.login(username="super", password="test")
response = self.client.get(add_group_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["form"])
def test_add_group_post_blank(self):
add_group_url = reverse("privilege.views.group.add_group")
self.client.login(username="super", password="test")
response = self.client.post(add_group_url, {"name": ""})
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["form"].errors)
def test_add_group_ok(self):
add_group_url = reverse("privilege.views.group.add_group")
self.client.login(username="super", password="test")
response = self.client.post(add_group_url, {"name": "add_success"})
self.assertEqual(response.status_code, 302)
self.assertTrue(Group.objects.filter(name="add_success").count())
Group.objects.filter(name="add_success").delete()
cache.set(GROUP_CACHE_KEY, None)
def test_edit_group_not_login(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(1, ))
self.check_not_login(edit_group_url)
def test_edit_group_not_superuser(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(1, ))
self.check_not_superuser(edit_group_url)
def test_test_edit_group_not_exist(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(0, ))
self.client.login(username="super", password="test")
response = self.client.get(edit_group_url)
self.assertEqual(response.status_code, 404)
def test_test_edit_group_not_post(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(1, ))
self.client.login(username="super", password="test")
response = self.client.get(edit_group_url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["form"])
def test_test_edit_group_post_blank(self):
edit_group_url = reverse("privilege.views.group.edit_group", args=(1, ))
self.client.login(username="super", password="test")
response = self.client.post(edit_group_url, {"name": ""})
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context["form"].errors)
def test_test_edit_group_ok(self):
group = Group.objects.create(name="to_delete")
edit_group_url = reverse("privilege.views.group.edit_group", args=(group.id, ))
self.client.login(username="super", password="test")
response = self.client.post(edit_group_url, {"name": "changed"})
self.assertEqual(response.status_code, 302)
group = Group.objects.get(id=group.id)
self.assertEqual(group.name, "changed")
group.delete()
cache.set(GROUP_CACHE_KEY, None)
def test_delete_grooup_not_login(self):
delete_group_url = reverse("privilege.views.group.delete_group", args=(1, ))
self.check_not_login(delete_group_url)
def test_delete_grooup_not_superuser(self):
delete_group_url = reverse("privilege.views.group.delete_group", args=(1, ))
self.check_not_superuser(delete_group_url)
def test_delete_grooup_ok(self):
delete_group_url = reverse("privilege.views.group.delete_group", args=(0, ))
response = self.client.post(delete_group_url)
self.assertEqual(response.status_code, 302)
cache.set(GROUP_CACHE_KEY, None)
def check_not_login(self, url):
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def check_not_superuser(self, url):
self.client.login(username="test", password="test")
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
| true | true |
f714119de3a6993ecf4b41e82919d082c69d9996 | 2,020 | py | Python | the-complete-guide-to-bot-creation/batch_renamer/cron_scheudule.py | rulgamer03/Python-Projects | 89a2418fadce0fd4674d3f7d3fa682a9aaa4b14d | [
"Apache-2.0"
] | 1 | 2021-06-18T16:29:46.000Z | 2021-06-18T16:29:46.000Z | the-complete-guide-to-bot-creation/batch_renamer/cron_scheudule.py | rulgamer03/Python-Projects | 89a2418fadce0fd4674d3f7d3fa682a9aaa4b14d | [
"Apache-2.0"
] | null | null | null | the-complete-guide-to-bot-creation/batch_renamer/cron_scheudule.py | rulgamer03/Python-Projects | 89a2418fadce0fd4674d3f7d3fa682a9aaa4b14d | [
"Apache-2.0"
] | null | null | null | import os
import time
from pathlib import Path # from path home
import schedule
print(Path.home()) # C:\Users\angel
old_files_folder_name = "old_files"
print("Hello ")
def clean_up_downloads():
print("Cleaning up Downloads")
# get all items from the downloads filder
download_folder_path = os.path.join(Path.home(), "Downloads", "Downloads")
download_items = os.listdir(download_folder_path)
moved_items = 0
# create the old files folder if not present
old_files_folder_path = os.path.join(download_folder_path, old_files_folder_name)
if old_files_folder_name not in download_items:
print(f"No {old_files_folder_name} folder yet, creating folder")
os.mkdir(old_files_folder_path) # create folder "old_files"
# create new folder with todays timestamp
timestamp = time.strftime("%Y_%m_%d") # Year month and day
datetime_folder_path = os.path.join(old_files_folder_path, timestamp)
if not os.path.exists(datetime_folder_path):
print(f"No {datetime_folder_path} folder yet, creating folder")
os.mkdir(datetime_folder_path)
else:
print(f"{timestamp} folder already exists in {old_files_folder_name}")
# rename all items to move them into the current datetime folder
to_be_moved = [item for item in download_items if item != old_files_folder_name] # also moves folders
for item in to_be_moved:
print(f"Moving {item} to {datetime_folder_path} folder")
old_path = os.path.join(download_folder_path, item)
new_path = os.path.join(datetime_folder_path, item)
os.rename(old_path, new_path)
moved_items += 1
print(f"Moved {moved_items} of {len(to_be_moved)} items")
# clean up the downloads folder every monday
# i execute the file on friday 20:21
schedule.every().friday.at("20:22").do(clean_up_downloads)
# keep the script running and sleep in between the checks
while True:
print("here")
schedule.run_pending()
# sleep 24h
time.sleep(1) # 60 * 60 * 24
| 34.827586 | 105 | 0.719802 | import os
import time
from pathlib import Path
import schedule
print(Path.home())
old_files_folder_name = "old_files"
print("Hello ")
def clean_up_downloads():
print("Cleaning up Downloads")
download_folder_path = os.path.join(Path.home(), "Downloads", "Downloads")
download_items = os.listdir(download_folder_path)
moved_items = 0
old_files_folder_path = os.path.join(download_folder_path, old_files_folder_name)
if old_files_folder_name not in download_items:
print(f"No {old_files_folder_name} folder yet, creating folder")
os.mkdir(old_files_folder_path)
timestamp = time.strftime("%Y_%m_%d")
datetime_folder_path = os.path.join(old_files_folder_path, timestamp)
if not os.path.exists(datetime_folder_path):
print(f"No {datetime_folder_path} folder yet, creating folder")
os.mkdir(datetime_folder_path)
else:
print(f"{timestamp} folder already exists in {old_files_folder_name}")
to_be_moved = [item for item in download_items if item != old_files_folder_name]
for item in to_be_moved:
print(f"Moving {item} to {datetime_folder_path} folder")
old_path = os.path.join(download_folder_path, item)
new_path = os.path.join(datetime_folder_path, item)
os.rename(old_path, new_path)
moved_items += 1
print(f"Moved {moved_items} of {len(to_be_moved)} items")
schedule.every().friday.at("20:22").do(clean_up_downloads)
while True:
print("here")
schedule.run_pending()
time.sleep(1)
| true | true |
f71411da7bac62abcff4bebab8f8c05a980019ae | 34,992 | py | Python | tests/test_types.py | gertjanvanzwieten/nutils | ec04d66e4797398496453181f96b14ad2edae228 | [
"MIT"
] | null | null | null | tests/test_types.py | gertjanvanzwieten/nutils | ec04d66e4797398496453181f96b14ad2edae228 | [
"MIT"
] | null | null | null | tests/test_types.py | gertjanvanzwieten/nutils | ec04d66e4797398496453181f96b14ad2edae228 | [
"MIT"
] | null | null | null | from nutils.testing import *
import nutils.types
import inspect, pickle, itertools, ctypes, stringly, tempfile, io, os
import numpy
class apply_annotations(TestCase):
def test_without_annotations(self):
@nutils.types.apply_annotations
def f(a, b):
return a, b
a, b = f(1, 2)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
def test_pos_or_kw(self):
@nutils.types.apply_annotations
def f(a:int, b, c:str):
return a, b, c
a, b, c = f(1, 2, 3)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
self.assertEqual(c, '3')
def test_with_signature(self):
def f(a):
return a
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1), '1')
def test_posonly(self):
def f(a):
return a
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_ONLY, annotation=str)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1), '1')
def test_kwonly(self):
@nutils.types.apply_annotations
def f(a:str, *, b:int, c:bool):
return a, b, c
self.assertEqual(f(1, b='2', c=3), ('1', 2, True))
def test_varpos(self):
@nutils.types.apply_annotations
def f(a:str, *args):
return a, args
self.assertEqual(f(1, 2, 3), ('1', (2, 3)))
def test_varpos_annotated(self):
map_str = lambda args: map(str, args)
@nutils.types.apply_annotations
def f(a:str, *args:map_str):
return a, args
self.assertEqual(f(1, 2, 3), ('1', ('2', '3')))
def test_varkw(self):
@nutils.types.apply_annotations
def f(a:str, **kwargs):
return a, kwargs
self.assertEqual(f(1, b=2, c=3), ('1', dict(b=2, c=3)))
def test_varkw_annotated(self):
map_str = lambda kwargs: {k: str(v) for k, v in kwargs.items()}
@nutils.types.apply_annotations
def f(a:str, **kwargs:map_str):
return a, kwargs
self.assertEqual(f(1, b=2, c=3), ('1', dict(b='2', c='3')))
def test_posonly_varkw(self):
def f(a, b, **c):
return a, b, c
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_ONLY, annotation=str),
inspect.Parameter('b', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str, default=None),
inspect.Parameter('c', inspect.Parameter.VAR_KEYWORD)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, None, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, b=None, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, b=4, c=2, d=3), ('1', '4', dict(c=2, d=3)))
def test_default_none(self):
@nutils.types.apply_annotations
def f(a:str=None):
return a
self.assertEqual(f(), None)
self.assertEqual(f(None), None)
self.assertEqual(f(1), '1')
class nutils_hash(TestCase):
class custom:
@property
def __nutils_hash__(self):
return b'01234567890123456789'
def f(self):
pass
def test_ellipsis(self):
self.assertEqual(nutils.types.nutils_hash(...).hex(), '0c8bce06e451e4d5c49f60da0abf2ccbadf80600')
def test_None(self):
self.assertEqual(nutils.types.nutils_hash(None).hex(), 'bdfcbd663476b2db5b2b2e59a6d93882a908dc76')
def test_bool(self):
self.assertEqual(nutils.types.nutils_hash(False).hex(), '04a5e8f73dcea55dcd7482a476cf2e7b53d6dc50')
self.assertEqual(nutils.types.nutils_hash(True).hex(), '3fe990437e1624c831729f2866979254437bb7e9')
def test_int(self):
self.assertEqual(nutils.types.nutils_hash(1).hex(), '00ec7dea895ebd921e56bbc554688d8b3a1e4dfc')
self.assertEqual(nutils.types.nutils_hash(2).hex(), '8ae88fa39407cf75e46f9e0aba8c971de2256b14')
def test_float(self):
self.assertEqual(nutils.types.nutils_hash(1.).hex(), 'def4bae4f2a3e29f6ddac537d3fa7c72195e5d8b')
self.assertEqual(nutils.types.nutils_hash(2.5).hex(), '5216c2bf3c16d8b8ff4d9b79f482e5cea0a4cb95')
def test_complex(self):
self.assertEqual(nutils.types.nutils_hash(1+0j).hex(), 'cf7a0d933b7bb8d3ca252683b137534a1ecae073')
self.assertEqual(nutils.types.nutils_hash(2+1j).hex(), 'ee088890528f941a80aa842dad36591b05253e55')
def test_inequality_numbers(self):
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(1.).hex())
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(1+0j).hex())
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(True).hex())
def test_str(self):
self.assertEqual(nutils.types.nutils_hash('spam').hex(), '3ca1023ab75a68dc7b0f83b43ec624704a7aef61')
self.assertEqual(nutils.types.nutils_hash('eggs').hex(), '124b0a7b3984e08125c380f7454896c1cad22e2c')
def test_bytes(self):
self.assertEqual(nutils.types.nutils_hash(b'spam').hex(), '5e717ec15aace7c25610c1dea340f2173f2df014')
self.assertEqual(nutils.types.nutils_hash(b'eggs').hex(), '98f2061978497751cac94f982fd96d9b015b74c3')
def test_tuple(self):
self.assertEqual(nutils.types.nutils_hash(()).hex(), '15d44755bf0731b2a3e9a5c5c8e0807b61881a1f')
self.assertEqual(nutils.types.nutils_hash((1,)).hex(), '328b16ebbc1815cf579ae038a35c4d68ebb022af')
self.assertNotEqual(nutils.types.nutils_hash((1,'spam')).hex(), nutils.types.nutils_hash(('spam',1)).hex())
def test_frozenset(self):
self.assertEqual(nutils.types.nutils_hash(frozenset([1,2])).hex(), '3862dc7e5321bc8a576c385ed2c12c71b96a375a')
self.assertEqual(nutils.types.nutils_hash(frozenset(['spam','eggs'])).hex(), '2c75fd3db57f5e505e1425ae9ff6dcbbc77fd123')
@unittest.skipIf(sys.version_info < (3,7), "not supported in this Python version")
def test_dataclass(self):
import dataclasses
A = dataclasses.make_dataclass('A', [('n', int), ('f', float)])
self.assertEqual(nutils.types.nutils_hash(A(n=1, f=2.5)).hex(), 'daf4235240e897beb9586db3c91663b24e229c52')
def test_type_bool(self):
self.assertEqual(nutils.types.nutils_hash(bool).hex(), 'feb912889d52d45fcd1e778c427b093a19a1ea78')
def test_type_int(self):
self.assertEqual(nutils.types.nutils_hash(int).hex(), 'aa8cb9975f7161b1f7ceb88b4b8585b49946b31e')
def test_type_float(self):
self.assertEqual(nutils.types.nutils_hash(float).hex(), '6d5079a53075f4b6f7710377838d8183730f1388')
def test_type_complex(self):
self.assertEqual(nutils.types.nutils_hash(complex).hex(), '6b00f6b9c6522742fd3f8054af6f10a24a671fff')
def test_type_str(self):
self.assertEqual(nutils.types.nutils_hash(str).hex(), '2349e11586163208d2581fe736630f4e4b680a7b')
def test_type_bytes(self):
self.assertEqual(nutils.types.nutils_hash(bytes).hex(), 'b0826ca666a48739e6f8b968d191adcefaa39670')
def test_type_tuple(self):
self.assertEqual(nutils.types.nutils_hash(tuple).hex(), '07cb4a24ca8ac53c820f20721432b4726e2ad1af')
def test_type_frozenset(self):
self.assertEqual(nutils.types.nutils_hash(frozenset).hex(), '48dc7cd0fbd54924498deb7c68dd363b4049f5e2')
def test_type_bufferedreader(self):
try:
fid, path = tempfile.mkstemp()
os.write(fid, b'test')
os.close(fid)
with open(path, 'rb') as f:
f.seek(2)
self.assertEqual(nutils.types.nutils_hash(f).hex(), '4edef1af3aa845b9e8bbde2d8265be5f30be4c2a')
self.assertEqual(f.tell(), 2)
with open(path, 'rb+') as f, self.assertRaises(TypeError):
nutils.types.nutils_hash(f).hex()
finally:
os.unlink(path)
def test_type_boundmethod(self):
self.assertEqual(nutils.types.nutils_hash(self.custom().f).hex(), 'ebf7084bb2504922235ab035a9197b9cb4cf47af')
def test_custom(self):
self.assertEqual(nutils.types.nutils_hash(self.custom()).hex(), b'01234567890123456789'.hex())
def test_unhashable(self):
with self.assertRaises(TypeError):
nutils.types.nutils_hash([])
class CacheMeta(TestCase):
def test_property(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
@property
def x(self):
nonlocal ncalls
ncalls += 1
return 1
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x, 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x, 1)
self.assertEqual(ncalls, 1)
def test_set_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
t = T()
with self.assertRaises(AttributeError):
t.x = 1
def test_del_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
t = T()
with self.assertRaises(AttributeError):
del t.x
def test_method_without_args(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self):
nonlocal ncalls
ncalls += 1
return 1
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(), 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(), 1)
self.assertEqual(ncalls, 1)
def test_method_with_args(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self, a, b):
nonlocal ncalls
ncalls += 1
return a + b
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a=1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(2, 2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=2, b=2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 3)
def test_method_with_args_and_preprocessors(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
@nutils.types.apply_annotations
def x(self, a:int, b:int):
nonlocal ncalls
ncalls += 1
return a + b
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a='1', b='2'), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x('2', '2'), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=2, b=2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x('1', 2), 3)
self.assertEqual(ncalls, 3)
def test_method_with_kwargs(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self, a, **kwargs):
nonlocal ncalls
ncalls += 1
return a + sum(kwargs.values())
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a=1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(1, b=2, c=3), 6)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=1, b=2, c=3), 6)
self.assertEqual(ncalls, 2)
def test_subclass_redefined_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
class U(T):
__cache__ = 'x',
@property
def x(self):
return super().x + 1
@property
def y(self):
return super().x
u1 = U()
self.assertEqual(u1.x, 2)
self.assertEqual(u1.y, 1)
u2 = U()
self.assertEqual(u2.y, 1)
self.assertEqual(u2.x, 2)
def test_missing_attribute(self):
with self.assertRaisesRegex(TypeError, 'Attribute listed in __cache__ is undefined: x'):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
def test_invalid_attribute(self):
with self.assertRaisesRegex(TypeError, "Don't know how to cache attribute x: None"):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
x = None
def test_name_mangling(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = '__x',
@property
def __x(self):
nonlocal ncalls
ncalls += 1
return 1
@property
def y(self):
return self.__x
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.y, 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.y, 1)
self.assertEqual(ncalls, 1)
class strictint(TestCase):
def test_int(self):
value = nutils.types.strictint(1)
self.assertEqual(value, 1)
self.assertEqual(type(value), int)
def test_numpy_int(self):
value = nutils.types.strictint(numpy.int64(1))
self.assertEqual(value, 1)
self.assertEqual(type(value), int)
def test_float(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1.)
def test_numpy_float(self):
with self.assertRaises(ValueError):
nutils.types.strictint(numpy.float64(1.))
def test_complex(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1+0j)
def test_str(self):
with self.assertRaises(ValueError):
nutils.types.strictint('1')
class strictfloat(TestCase):
def test_int(self):
value = nutils.types.strictfloat(1)
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_numpy_int(self):
value = nutils.types.strictfloat(numpy.int64(1))
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_float(self):
value = nutils.types.strictfloat(1.)
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_numpy_float(self):
value = nutils.types.strictfloat(numpy.float64(1.))
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_complex(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1+0j)
def test_str(self):
with self.assertRaises(ValueError):
nutils.types.strictfloat('1.')
class strictstr(TestCase):
def test_str(self):
value = nutils.types.strictstr('spam')
self.assertEqual(value, 'spam')
self.assertEqual(type(value), str)
def test_int(self):
with self.assertRaises(ValueError):
nutils.types.strictstr(1)
class strict(TestCase):
def test_valid(self):
self.assertEqual(nutils.types.strict[int](1), 1)
def test_invalid(self):
with self.assertRaises(ValueError):
nutils.types.strict[int]('1')
def test_call(self):
with self.assertRaises(TypeError):
nutils.types.strict()
class tupletype(TestCase):
def test_valid1(self):
value = nutils.types.tuple[nutils.types.strictint]([])
self.assertEqual(value, ())
self.assertEqual(type(value), tuple)
def test_valid2(self):
value = nutils.types.tuple[nutils.types.strictint]([1,2,3])
self.assertEqual(value, (1,2,3))
self.assertEqual(type(value), tuple)
def test_invalid(self):
with self.assertRaises(ValueError):
nutils.types.tuple[nutils.types.strictint]([1, 'spam','eggs'])
def test_without_item_constructor(self):
src = 1,2,3
self.assertEqual(nutils.types.tuple(src), tuple(src))
def test_name(self):
self.assertEqual(nutils.types.tuple[nutils.types.strictint].__name__, 'tuple[nutils.types.strictint]')
class frozendict(TestCase):
def test_constructor(self):
src = {'spam': 1, 'eggs': 2.3}
for name, value in [('mapping', src), ('mapping_view', src.items()), ('iterable', (item for item in src.items())), ('frozendict', nutils.types.frozendict(src))]:
with self.subTest(name):
frozen = nutils.types.frozendict(value)
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), src)
def test_constructor_invalid(self):
with self.assertRaises(ValueError):
nutils.types.frozendict(['spam', 'eggs', 1])
def test_clsgetitem(self):
T = nutils.types.frozendict[str, float]
src = {1: 2, 'spam': '2.3'}
for name, value in [('mapping', src), ('mapping_view', src.items()), ('iterable', (item for item in src.items()))]:
with self.subTest(name):
frozen = T(value)
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), {'1': 2., 'spam': 2.3})
def test_clsgetitem_invalid_types(self):
with self.assertRaises(RuntimeError):
nutils.types.frozendict[str, float, bool]
def test_clsgetitem_invalid_value(self):
T = nutils.types.frozendict[str, float]
with self.assertRaises(ValueError):
T(1)
def test_setitem(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(TypeError):
frozen['eggs'] = 3
def test_delitem(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(TypeError):
del frozen['eggs']
def test_getitem_existing(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertEqual(frozen['spam'], 1)
def test_getitem_nonexisting(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(KeyError):
frozen['foo']
def test_contains(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertIn('spam', frozen)
self.assertNotIn('foo', frozen)
def test_iter(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = nutils.types.frozendict(src)
self.assertEqual(frozenset(frozen), frozenset(src))
def test_len(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = nutils.types.frozendict(src)
self.assertEqual(len(frozen), len(src))
def test_hash(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertEqual(hash(nutils.types.frozendict(src)), hash(nutils.types.frozendict(src)))
def test_copy(self):
src = {'spam': 1, 'eggs': 2.3}
copy = nutils.types.frozendict(src).copy()
self.assertIsInstance(copy, dict)
self.assertEqual(copy, src)
def test_pickle(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = pickle.loads(pickle.dumps(nutils.types.frozendict(src)))
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), src)
def test_eq_same_id(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
self.assertEqual(a, a)
def test_eq_other_id(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
b = nutils.types.frozendict(src)
self.assertEqual(a, b)
def test_eq_deduplicated(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
b = nutils.types.frozendict(src)
a == b # this replaces `a.__base` with `b.__base`
self.assertEqual(a, b)
def test_ineq_frozendict(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertNotEqual(nutils.types.frozendict(src), nutils.types.frozendict({'spam': 1}))
def test_ineq_dict(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertNotEqual(nutils.types.frozendict(src), src)
def test_nutils_hash(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertEqual(nutils.types.nutils_hash(frozen).hex(), '8cf14f109e54707af9c2e66d7d3cdb755cce8243')
class frozenmultiset(TestCase):
def test_constructor(self):
src = 'spam', 'bacon', 'sausage', 'spam'
for name, value in [('tuple', src), ('frozenmultiset', nutils.types.frozenmultiset(src))]:
with self.subTest(name=name):
frozen = nutils.types.frozenmultiset(value)
for item in 'spam', 'bacon', 'sausage':
self.assertEqual({k: tuple(frozen).count(k) for k in set(src)}, {'spam':2, 'bacon':1, 'sausage':1})
def test_clsgetitem(self):
src = False, 1, numpy.int64(2)
frozen = nutils.types.frozenmultiset[nutils.types.strictint](src)
self.assertEqual(set(frozen), {0, 1, 2})
def test_preserve_order(self):
for src in [('spam', 'bacon', 'sausage', 'spam'), ('spam', 'egg', 'spam', 'spam', 'bacon', 'spam')]:
with self.subTest(src=src):
self.assertEqual(tuple(nutils.types.frozenmultiset(src)), src)
def test_and(self):
for l, r, lar in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], ['spam', 'eggs']],
[['spam'], ['eggs'], []],
[['spam','spam']]*3]:
with self.subTest(l=l, r=r, lar=lar):
self.assertEqual(nutils.types.frozenmultiset(l)&nutils.types.frozenmultiset(r), nutils.types.frozenmultiset(lar))
with self.subTest(l=r, r=l, lar=lar):
self.assertEqual(nutils.types.frozenmultiset(r)&nutils.types.frozenmultiset(l), nutils.types.frozenmultiset(lar))
def test_sub(self):
for l, r, lmr, rml in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], [], ['spam']],
[['spam'], ['eggs'], ['spam'], ['eggs']],
[['spam'], ['spam'], [], []]]:
with self.subTest(l=l, r=r, lmr=lmr):
self.assertEqual(nutils.types.frozenmultiset(l)-nutils.types.frozenmultiset(r), nutils.types.frozenmultiset(lmr))
with self.subTest(l=r, r=l, lmr=rml):
self.assertEqual(nutils.types.frozenmultiset(r)-nutils.types.frozenmultiset(l), nutils.types.frozenmultiset(rml))
def test_pickle(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = pickle.loads(pickle.dumps(nutils.types.frozenmultiset(src)))
self.assertIsInstance(frozen, nutils.types.frozenmultiset)
self.assertEqual(frozen, nutils.types.frozenmultiset(src))
def test_hash(self):
src = 'spam', 'bacon', 'sausage', 'spam'
ref = nutils.types.frozenmultiset(src)
for perm in itertools.permutations(src):
with self.subTest(perm=perm):
self.assertEqual(hash(nutils.types.frozenmultiset(src)), hash(ref))
def test_nutils_hash(self):
for perm in itertools.permutations(('spam', 'bacon', 'sausage', 'spam')):
with self.subTest(perm=perm):
frozen = nutils.types.frozenmultiset(perm)
self.assertEqual(nutils.types.nutils_hash(frozen).hex(), 'f3fd9c6d4741af2e67973457ee6308deddcb714c')
def test_eq(self):
src = 'spam', 'bacon', 'sausage', 'spam'
ref = nutils.types.frozenmultiset(src)
for perm in itertools.permutations(src):
with self.subTest(perm=perm):
self.assertEqual(nutils.types.frozenmultiset(src), ref)
def test_contains(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = nutils.types.frozenmultiset(src)
for item in 'spam', 'bacon', 'eggs':
with self.subTest(item=item):
if item in src:
self.assertIn(item, frozen)
else:
self.assertNotIn(item, frozen)
def test_len(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = nutils.types.frozenmultiset(src)
self.assertEqual(len(frozen), len(src))
def test_nonzero(self):
self.assertTrue(nutils.types.frozenmultiset(['spam', 'eggs']))
self.assertFalse(nutils.types.frozenmultiset([]))
def test_add(self):
l = nutils.types.frozenmultiset(['spam', 'bacon'])
r = nutils.types.frozenmultiset(['sausage', 'spam'])
lpr = nutils.types.frozenmultiset(['spam', 'bacon', 'sausage', 'spam'])
self.assertEqual(l+r, lpr)
def test_isdisjoint(self):
for l, r, disjoint in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], False],
[['spam'], ['eggs'], True],
[['spam'], ['spam'], False]]:
with self.subTest(l=l, r=r, disjoint=disjoint):
self.assertEqual(nutils.types.frozenmultiset(l).isdisjoint(nutils.types.frozenmultiset(r)), disjoint)
class frozenarray(TestCase):
def _test_constructor(self, src, frozen_dtype, src_types=(list,numpy.array,nutils.types.frozenarray)):
src = list(src)
for copy in True, False:
for src_type in src_types:
with self.subTest(copy=copy, src_type=src_type):
frozen = nutils.types.frozenarray(src_type(src), copy=copy, dtype=frozen_dtype)
self.assertIsInstance(frozen, nutils.types.frozenarray)
self.assertEqual(frozen.tolist(), src)
def _test_constructor_raises(self, src, frozen_dtype, exc_type, exc_regex):
src = list(src)
for copy in True, False:
for src_type in list, numpy.array, nutils.types.frozenarray:
with self.subTest(copy=copy, src_type=src_type), self.assertRaisesRegex(exc_type, exc_regex):
nutils.types.frozenarray(src_type(src), copy=copy, dtype=frozen_dtype)
def test_constructor_bool(self):
self._test_constructor((False, True), bool)
def test_constructor_bool_emptyarray(self):
self._test_constructor((), bool, src_types=[list])
def test_constructor_int(self):
self._test_constructor((0,1), int)
def test_constructor_int_upcast(self):
self._test_constructor((False,True), int)
def test_constructor_int_downcast(self):
self._test_constructor((0.,1.), int)
def test_constructor_int_emptyarray(self):
self._test_constructor((), int, src_types=[list])
def test_constructor_float(self):
self._test_constructor((0.,1.), float)
def test_constructor_float_upcast(self):
self._test_constructor((0,1), float)
def test_constructor_float_downcast(self):
src = [0.+0j,1.+0j]
for copy in True, False:
with self.subTest(copy=copy, src_type=list), self.assertRaises(TypeError):
nutils.types.frozenarray(src, copy=copy, dtype=float)
for src_type in numpy.array, nutils.types.frozenarray:
with self.subTest(copy=copy, src_type=src_type), self.assertWarns(numpy.ComplexWarning):
nutils.types.frozenarray(src_type(src), copy=copy, dtype=float)
def test_constructor_complex(self):
self._test_constructor((0+0j,1+1j), complex)
def test_constructor_strictint(self):
self._test_constructor((0,1), nutils.types.strictint)
def test_constructor_strictint_upcast(self):
self._test_constructor((False,True), nutils.types.strictint)
def test_constructor_strictint_downcast(self):
self._test_constructor_raises((0.,1.), nutils.types.strictint, ValueError, '^downcasting .* is forbidden$')
def test_constructor_strictint_emptyarray(self):
self._test_constructor((), nutils.types.strictint, src_types=[list])
def test_constructor_strictfloat(self):
self._test_constructor((0.,1.), nutils.types.strictfloat)
def test_constructor_strictfloat_upcast(self):
self._test_constructor((0,1), nutils.types.strictfloat)
def test_constructor_strictfloat_downcast(self):
self._test_constructor_raises((0.+0j,1.+0j), nutils.types.strictfloat, ValueError, '^downcasting .* is forbidden$')
def test_constructor_invalid_dtype(self):
self._test_constructor_raises((0,1), list, ValueError, '^unsupported dtype:')
def test_clsgetitem(self):
src = [0.,1.]
frozen = nutils.types.frozenarray[nutils.types.strictfloat](src)
self.assertIsInstance(frozen, nutils.types.frozenarray)
self.assertEqual(frozen.tolist(), src)
def test_clsgetitem_invalid(self):
src = [0.,1.]
with self.assertRaises(ValueError):
nutils.types.frozenarray[nutils.types.strictint](src)
def test_nutils_hash(self):
a = nutils.types.frozenarray(numpy.array([[1,2],[3,4]], numpy.int64))
b = nutils.types.frozenarray(numpy.array([[1,3],[2,4]], numpy.int64))
self.assertNotEqual(nutils.types.nutils_hash(a).hex(), nutils.types.nutils_hash(b).hex())
self.assertEqual(nutils.types.nutils_hash(a).hex(), nutils.types.nutils_hash(b.T).hex())
self.assertEqual(nutils.types.nutils_hash(a).hex(), '42cc3a5e1216c1f0a9921a61a3a2c67025c98d69')
self.assertEqual(nutils.types.nutils_hash(b).hex(), '8f0c9f9a118c42c258f1e69e374aadda99b4be97')
def test_pickle(self):
src = [[1,2],[3,4]]
value = pickle.loads(pickle.dumps(nutils.types.frozenarray(src)))
self.assertIsInstance(value, nutils.types.frozenarray)
self.assertEqual(value, nutils.types.frozenarray(src))
def test_eq_same_instance(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertEqual(a, a)
def test_eq_not_frozenarray(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertNotEqual(a, [[1,2],[3,4]])
def test_eq_same_base(self):
base = numpy.array([[1,2],[3,4]], int)
a = nutils.types.frozenarray(base, copy=False)
b = nutils.types.frozenarray(base, copy=False)
self.assertEqual(a, b)
def test_eq_different_array(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,3],[2,4]], int)
self.assertNotEqual(a, b)
def test_eq_different_dtype(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,2],[3,4]], float)
self.assertNotEqual(a, b)
def test_eq_different_base(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertEqual(a, b)
def test_ineq_equal(self):
l = nutils.types.frozenarray([1,2], int)
r = nutils.types.frozenarray([1,2], int)
self.assertFalse(l < r)
self.assertTrue(l <= r)
self.assertFalse(l > r)
self.assertTrue(l >= r)
def test_ineq_smaller(self):
l = nutils.types.frozenarray([1,2], int)
r = nutils.types.frozenarray([2,1], int)
self.assertTrue(l < r)
self.assertTrue(l <= r)
self.assertFalse(l > r)
self.assertFalse(l >= r)
def test_ineq_larger(self):
l = nutils.types.frozenarray([2,1], int)
r = nutils.types.frozenarray([1,2], int)
self.assertFalse(l < r)
self.assertFalse(l <= r)
self.assertTrue(l > r)
self.assertTrue(l >= r)
def test_ineq_incomparable(self):
array = nutils.types.frozenarray([1,2], int)
for op in operator.lt, operator.le, operator.gt, operator.ge:
with self.subTest(op=op), self.assertRaises(TypeError):
op(array, 1)
def test_full(self):
self.assertEqual(nutils.types.frozenarray.full([2,3], 1.5), nutils.types.frozenarray([[1.5]*3]*2, float))
def test_as_numpy_array(self):
a = numpy.array(nutils.types.frozenarray([1,2]))
self.assertIsInstance(a, numpy.ndarray)
class c_array(TestCase):
def test_idempotence(self):
a = numpy.array([1,2,3], dtype=numpy.int64)
P = nutils.types.c_array[numpy.int64]
a_ct = P(a)
self.assertEqual(P(a_ct), a_ct)
def test_list(self):
a = [1,2,3]
a_ct = nutils.types.c_array[numpy.int64](a)
self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1)
def test_array(self):
a = numpy.array([1,2,3], dtype=numpy.int64)
a_ct = nutils.types.c_array[numpy.int64](a)
self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1)
def test_array_invalid_dtype(self):
a = numpy.array([1,2,3], dtype=numpy.int32)
with self.assertRaisesRegex(ValueError, '^Expected dtype .* but array has dtype .*\\.$'):
a_ct = nutils.types.c_array[numpy.int64](a)
def test_array_noncontinguous(self):
a = numpy.array([[1,2],[3,4]], dtype=numpy.int32).T
with self.assertRaisesRegex(ValueError, '^Array is not contiguous\\.$'):
a_ct = nutils.types.c_array[numpy.int64](a)
def test_wo_getitem(self):
with self.assertRaises(TypeError):
nutils.types.c_array()
class T_Immutable(nutils.types.Immutable):
def __init__(self, x, y, *, z):
pass
class T_Singleton(nutils.types.Singleton):
def __init__(self, x, y, *, z):
pass
@parametrize
class ImmutableFamily(TestCase):
def test_pickle(self):
T = {nutils.types.Immutable: T_Immutable, nutils.types.Singleton: T_Singleton}[self.cls]
a = T(1, 2, z=3)
b = pickle.loads(pickle.dumps(a))
self.assertEqual(a, b)
def test_eq(self):
class T(self.cls):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
self.assertEqual(T(1, 2), T(1, 2))
self.assertNotEqual(T(1, 2), T(2, 1))
self.assertNotEqual(T(1, 2), U(1, 2))
def test_canonical_args(self):
class T(self.cls):
def __init__(self, x, y, z=3):
pass
self.assertEqual(T(x=1, y=2), T(1, 2, 3))
def test_keyword_args(self):
class T(self.cls):
def __init__(self, x, y, **kwargs):
pass
a = T(x=1, y=2, z=3)
b = T(1, 2, z=3)
self.assertEqual(a, b)
def test_preprocessors(self):
class T(self.cls):
@nutils.types.apply_annotations
def __init__(self, x: int):
pass
self.assertEqual(T(1), T('1'))
self.assertEqual(T(1), T(x='1'))
def test_nutils_hash(self):
class T(self.cls):
def __init__(self, x, y):
pass
class T1(self.cls, version=1):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
self.assertEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(T(1, 2)).hex())
self.assertNotEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(T(2, 1)).hex())
self.assertNotEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(U(1, 2)).hex())
# Since the hash does not include base classes, the hashes of Immutable and Singleton are the same.
self.assertEqual(nutils.types.nutils_hash(T(1, 2)).hex(), '8c3ba8f0d9eb054ab192f4e4e2ba7442564bdf85')
self.assertEqual(nutils.types.nutils_hash(T1(1, 2)).hex(), 'bab4ee65b5189f544a4242f0e386af76cfa6e31d')
@parametrize.enable_if(lambda cls: cls is nutils.types.Singleton)
def test_deduplication(self):
class T(self.cls):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
a = T(1, 2)
b = T(1, 2)
c = T(2, 1)
d = U(1, 2)
self.assertIs(a, b)
self.assertEqual(a, b)
self.assertIsNot(a, c)
self.assertNotEqual(a, c)
self.assertIsNot(a, d)
self.assertNotEqual(a, d)
ImmutableFamily(cls=nutils.types.Immutable)
ImmutableFamily(cls=nutils.types.Singleton)
# vim:sw=2:sts=2:et
| 34.714286 | 165 | 0.658551 | from nutils.testing import *
import nutils.types
import inspect, pickle, itertools, ctypes, stringly, tempfile, io, os
import numpy
class apply_annotations(TestCase):
def test_without_annotations(self):
@nutils.types.apply_annotations
def f(a, b):
return a, b
a, b = f(1, 2)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
def test_pos_or_kw(self):
@nutils.types.apply_annotations
def f(a:int, b, c:str):
return a, b, c
a, b, c = f(1, 2, 3)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
self.assertEqual(c, '3')
def test_with_signature(self):
def f(a):
return a
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1), '1')
def test_posonly(self):
def f(a):
return a
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_ONLY, annotation=str)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1), '1')
def test_kwonly(self):
@nutils.types.apply_annotations
def f(a:str, *, b:int, c:bool):
return a, b, c
self.assertEqual(f(1, b='2', c=3), ('1', 2, True))
def test_varpos(self):
@nutils.types.apply_annotations
def f(a:str, *args):
return a, args
self.assertEqual(f(1, 2, 3), ('1', (2, 3)))
def test_varpos_annotated(self):
map_str = lambda args: map(str, args)
@nutils.types.apply_annotations
def f(a:str, *args:map_str):
return a, args
self.assertEqual(f(1, 2, 3), ('1', ('2', '3')))
def test_varkw(self):
@nutils.types.apply_annotations
def f(a:str, **kwargs):
return a, kwargs
self.assertEqual(f(1, b=2, c=3), ('1', dict(b=2, c=3)))
def test_varkw_annotated(self):
map_str = lambda kwargs: {k: str(v) for k, v in kwargs.items()}
@nutils.types.apply_annotations
def f(a:str, **kwargs:map_str):
return a, kwargs
self.assertEqual(f(1, b=2, c=3), ('1', dict(b='2', c='3')))
def test_posonly_varkw(self):
def f(a, b, **c):
return a, b, c
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_ONLY, annotation=str),
inspect.Parameter('b', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str, default=None),
inspect.Parameter('c', inspect.Parameter.VAR_KEYWORD)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, None, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, b=None, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, b=4, c=2, d=3), ('1', '4', dict(c=2, d=3)))
def test_default_none(self):
@nutils.types.apply_annotations
def f(a:str=None):
return a
self.assertEqual(f(), None)
self.assertEqual(f(None), None)
self.assertEqual(f(1), '1')
class nutils_hash(TestCase):
class custom:
@property
def __nutils_hash__(self):
return b'01234567890123456789'
def f(self):
pass
def test_ellipsis(self):
self.assertEqual(nutils.types.nutils_hash(...).hex(), '0c8bce06e451e4d5c49f60da0abf2ccbadf80600')
def test_None(self):
self.assertEqual(nutils.types.nutils_hash(None).hex(), 'bdfcbd663476b2db5b2b2e59a6d93882a908dc76')
def test_bool(self):
self.assertEqual(nutils.types.nutils_hash(False).hex(), '04a5e8f73dcea55dcd7482a476cf2e7b53d6dc50')
self.assertEqual(nutils.types.nutils_hash(True).hex(), '3fe990437e1624c831729f2866979254437bb7e9')
def test_int(self):
self.assertEqual(nutils.types.nutils_hash(1).hex(), '00ec7dea895ebd921e56bbc554688d8b3a1e4dfc')
self.assertEqual(nutils.types.nutils_hash(2).hex(), '8ae88fa39407cf75e46f9e0aba8c971de2256b14')
def test_float(self):
self.assertEqual(nutils.types.nutils_hash(1.).hex(), 'def4bae4f2a3e29f6ddac537d3fa7c72195e5d8b')
self.assertEqual(nutils.types.nutils_hash(2.5).hex(), '5216c2bf3c16d8b8ff4d9b79f482e5cea0a4cb95')
def test_complex(self):
self.assertEqual(nutils.types.nutils_hash(1+0j).hex(), 'cf7a0d933b7bb8d3ca252683b137534a1ecae073')
self.assertEqual(nutils.types.nutils_hash(2+1j).hex(), 'ee088890528f941a80aa842dad36591b05253e55')
def test_inequality_numbers(self):
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(1.).hex())
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(1+0j).hex())
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(True).hex())
def test_str(self):
self.assertEqual(nutils.types.nutils_hash('spam').hex(), '3ca1023ab75a68dc7b0f83b43ec624704a7aef61')
self.assertEqual(nutils.types.nutils_hash('eggs').hex(), '124b0a7b3984e08125c380f7454896c1cad22e2c')
def test_bytes(self):
self.assertEqual(nutils.types.nutils_hash(b'spam').hex(), '5e717ec15aace7c25610c1dea340f2173f2df014')
self.assertEqual(nutils.types.nutils_hash(b'eggs').hex(), '98f2061978497751cac94f982fd96d9b015b74c3')
def test_tuple(self):
self.assertEqual(nutils.types.nutils_hash(()).hex(), '15d44755bf0731b2a3e9a5c5c8e0807b61881a1f')
self.assertEqual(nutils.types.nutils_hash((1,)).hex(), '328b16ebbc1815cf579ae038a35c4d68ebb022af')
self.assertNotEqual(nutils.types.nutils_hash((1,'spam')).hex(), nutils.types.nutils_hash(('spam',1)).hex())
def test_frozenset(self):
self.assertEqual(nutils.types.nutils_hash(frozenset([1,2])).hex(), '3862dc7e5321bc8a576c385ed2c12c71b96a375a')
self.assertEqual(nutils.types.nutils_hash(frozenset(['spam','eggs'])).hex(), '2c75fd3db57f5e505e1425ae9ff6dcbbc77fd123')
@unittest.skipIf(sys.version_info < (3,7), "not supported in this Python version")
def test_dataclass(self):
import dataclasses
A = dataclasses.make_dataclass('A', [('n', int), ('f', float)])
self.assertEqual(nutils.types.nutils_hash(A(n=1, f=2.5)).hex(), 'daf4235240e897beb9586db3c91663b24e229c52')
def test_type_bool(self):
self.assertEqual(nutils.types.nutils_hash(bool).hex(), 'feb912889d52d45fcd1e778c427b093a19a1ea78')
def test_type_int(self):
self.assertEqual(nutils.types.nutils_hash(int).hex(), 'aa8cb9975f7161b1f7ceb88b4b8585b49946b31e')
def test_type_float(self):
self.assertEqual(nutils.types.nutils_hash(float).hex(), '6d5079a53075f4b6f7710377838d8183730f1388')
def test_type_complex(self):
self.assertEqual(nutils.types.nutils_hash(complex).hex(), '6b00f6b9c6522742fd3f8054af6f10a24a671fff')
def test_type_str(self):
self.assertEqual(nutils.types.nutils_hash(str).hex(), '2349e11586163208d2581fe736630f4e4b680a7b')
def test_type_bytes(self):
self.assertEqual(nutils.types.nutils_hash(bytes).hex(), 'b0826ca666a48739e6f8b968d191adcefaa39670')
def test_type_tuple(self):
self.assertEqual(nutils.types.nutils_hash(tuple).hex(), '07cb4a24ca8ac53c820f20721432b4726e2ad1af')
def test_type_frozenset(self):
self.assertEqual(nutils.types.nutils_hash(frozenset).hex(), '48dc7cd0fbd54924498deb7c68dd363b4049f5e2')
def test_type_bufferedreader(self):
try:
fid, path = tempfile.mkstemp()
os.write(fid, b'test')
os.close(fid)
with open(path, 'rb') as f:
f.seek(2)
self.assertEqual(nutils.types.nutils_hash(f).hex(), '4edef1af3aa845b9e8bbde2d8265be5f30be4c2a')
self.assertEqual(f.tell(), 2)
with open(path, 'rb+') as f, self.assertRaises(TypeError):
nutils.types.nutils_hash(f).hex()
finally:
os.unlink(path)
def test_type_boundmethod(self):
self.assertEqual(nutils.types.nutils_hash(self.custom().f).hex(), 'ebf7084bb2504922235ab035a9197b9cb4cf47af')
def test_custom(self):
self.assertEqual(nutils.types.nutils_hash(self.custom()).hex(), b'01234567890123456789'.hex())
def test_unhashable(self):
with self.assertRaises(TypeError):
nutils.types.nutils_hash([])
class CacheMeta(TestCase):
def test_property(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
@property
def x(self):
nonlocal ncalls
ncalls += 1
return 1
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x, 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x, 1)
self.assertEqual(ncalls, 1)
def test_set_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
t = T()
with self.assertRaises(AttributeError):
t.x = 1
def test_del_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
t = T()
with self.assertRaises(AttributeError):
del t.x
def test_method_without_args(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self):
nonlocal ncalls
ncalls += 1
return 1
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(), 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(), 1)
self.assertEqual(ncalls, 1)
def test_method_with_args(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self, a, b):
nonlocal ncalls
ncalls += 1
return a + b
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a=1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(2, 2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=2, b=2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 3)
def test_method_with_args_and_preprocessors(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
@nutils.types.apply_annotations
def x(self, a:int, b:int):
nonlocal ncalls
ncalls += 1
return a + b
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a='1', b='2'), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x('2', '2'), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=2, b=2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x('1', 2), 3)
self.assertEqual(ncalls, 3)
def test_method_with_kwargs(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self, a, **kwargs):
nonlocal ncalls
ncalls += 1
return a + sum(kwargs.values())
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a=1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(1, b=2, c=3), 6)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=1, b=2, c=3), 6)
self.assertEqual(ncalls, 2)
def test_subclass_redefined_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
class U(T):
__cache__ = 'x',
@property
def x(self):
return super().x + 1
@property
def y(self):
return super().x
u1 = U()
self.assertEqual(u1.x, 2)
self.assertEqual(u1.y, 1)
u2 = U()
self.assertEqual(u2.y, 1)
self.assertEqual(u2.x, 2)
def test_missing_attribute(self):
with self.assertRaisesRegex(TypeError, 'Attribute listed in __cache__ is undefined: x'):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
def test_invalid_attribute(self):
with self.assertRaisesRegex(TypeError, "Don't know how to cache attribute x: None"):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
x = None
def test_name_mangling(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = '__x',
@property
def __x(self):
nonlocal ncalls
ncalls += 1
return 1
@property
def y(self):
return self.__x
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.y, 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.y, 1)
self.assertEqual(ncalls, 1)
class strictint(TestCase):
def test_int(self):
value = nutils.types.strictint(1)
self.assertEqual(value, 1)
self.assertEqual(type(value), int)
def test_numpy_int(self):
value = nutils.types.strictint(numpy.int64(1))
self.assertEqual(value, 1)
self.assertEqual(type(value), int)
def test_float(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1.)
def test_numpy_float(self):
with self.assertRaises(ValueError):
nutils.types.strictint(numpy.float64(1.))
def test_complex(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1+0j)
def test_str(self):
with self.assertRaises(ValueError):
nutils.types.strictint('1')
class strictfloat(TestCase):
def test_int(self):
value = nutils.types.strictfloat(1)
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_numpy_int(self):
value = nutils.types.strictfloat(numpy.int64(1))
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_float(self):
value = nutils.types.strictfloat(1.)
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_numpy_float(self):
value = nutils.types.strictfloat(numpy.float64(1.))
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_complex(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1+0j)
def test_str(self):
with self.assertRaises(ValueError):
nutils.types.strictfloat('1.')
class strictstr(TestCase):
def test_str(self):
value = nutils.types.strictstr('spam')
self.assertEqual(value, 'spam')
self.assertEqual(type(value), str)
def test_int(self):
with self.assertRaises(ValueError):
nutils.types.strictstr(1)
class strict(TestCase):
def test_valid(self):
self.assertEqual(nutils.types.strict[int](1), 1)
def test_invalid(self):
with self.assertRaises(ValueError):
nutils.types.strict[int]('1')
def test_call(self):
with self.assertRaises(TypeError):
nutils.types.strict()
class tupletype(TestCase):
def test_valid1(self):
value = nutils.types.tuple[nutils.types.strictint]([])
self.assertEqual(value, ())
self.assertEqual(type(value), tuple)
def test_valid2(self):
value = nutils.types.tuple[nutils.types.strictint]([1,2,3])
self.assertEqual(value, (1,2,3))
self.assertEqual(type(value), tuple)
def test_invalid(self):
with self.assertRaises(ValueError):
nutils.types.tuple[nutils.types.strictint]([1, 'spam','eggs'])
def test_without_item_constructor(self):
src = 1,2,3
self.assertEqual(nutils.types.tuple(src), tuple(src))
def test_name(self):
self.assertEqual(nutils.types.tuple[nutils.types.strictint].__name__, 'tuple[nutils.types.strictint]')
class frozendict(TestCase):
def test_constructor(self):
src = {'spam': 1, 'eggs': 2.3}
for name, value in [('mapping', src), ('mapping_view', src.items()), ('iterable', (item for item in src.items())), ('frozendict', nutils.types.frozendict(src))]:
with self.subTest(name):
frozen = nutils.types.frozendict(value)
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), src)
def test_constructor_invalid(self):
with self.assertRaises(ValueError):
nutils.types.frozendict(['spam', 'eggs', 1])
def test_clsgetitem(self):
T = nutils.types.frozendict[str, float]
src = {1: 2, 'spam': '2.3'}
for name, value in [('mapping', src), ('mapping_view', src.items()), ('iterable', (item for item in src.items()))]:
with self.subTest(name):
frozen = T(value)
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), {'1': 2., 'spam': 2.3})
def test_clsgetitem_invalid_types(self):
with self.assertRaises(RuntimeError):
nutils.types.frozendict[str, float, bool]
def test_clsgetitem_invalid_value(self):
T = nutils.types.frozendict[str, float]
with self.assertRaises(ValueError):
T(1)
def test_setitem(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(TypeError):
frozen['eggs'] = 3
def test_delitem(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(TypeError):
del frozen['eggs']
def test_getitem_existing(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertEqual(frozen['spam'], 1)
def test_getitem_nonexisting(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(KeyError):
frozen['foo']
def test_contains(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertIn('spam', frozen)
self.assertNotIn('foo', frozen)
def test_iter(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = nutils.types.frozendict(src)
self.assertEqual(frozenset(frozen), frozenset(src))
def test_len(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = nutils.types.frozendict(src)
self.assertEqual(len(frozen), len(src))
def test_hash(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertEqual(hash(nutils.types.frozendict(src)), hash(nutils.types.frozendict(src)))
def test_copy(self):
src = {'spam': 1, 'eggs': 2.3}
copy = nutils.types.frozendict(src).copy()
self.assertIsInstance(copy, dict)
self.assertEqual(copy, src)
def test_pickle(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = pickle.loads(pickle.dumps(nutils.types.frozendict(src)))
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), src)
def test_eq_same_id(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
self.assertEqual(a, a)
def test_eq_other_id(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
b = nutils.types.frozendict(src)
self.assertEqual(a, b)
def test_eq_deduplicated(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
b = nutils.types.frozendict(src)
a == b # this replaces `a.__base` with `b.__base`
self.assertEqual(a, b)
def test_ineq_frozendict(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertNotEqual(nutils.types.frozendict(src), nutils.types.frozendict({'spam': 1}))
def test_ineq_dict(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertNotEqual(nutils.types.frozendict(src), src)
def test_nutils_hash(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertEqual(nutils.types.nutils_hash(frozen).hex(), '8cf14f109e54707af9c2e66d7d3cdb755cce8243')
class frozenmultiset(TestCase):
def test_constructor(self):
src = 'spam', 'bacon', 'sausage', 'spam'
for name, value in [('tuple', src), ('frozenmultiset', nutils.types.frozenmultiset(src))]:
with self.subTest(name=name):
frozen = nutils.types.frozenmultiset(value)
for item in 'spam', 'bacon', 'sausage':
self.assertEqual({k: tuple(frozen).count(k) for k in set(src)}, {'spam':2, 'bacon':1, 'sausage':1})
def test_clsgetitem(self):
src = False, 1, numpy.int64(2)
frozen = nutils.types.frozenmultiset[nutils.types.strictint](src)
self.assertEqual(set(frozen), {0, 1, 2})
def test_preserve_order(self):
for src in [('spam', 'bacon', 'sausage', 'spam'), ('spam', 'egg', 'spam', 'spam', 'bacon', 'spam')]:
with self.subTest(src=src):
self.assertEqual(tuple(nutils.types.frozenmultiset(src)), src)
def test_and(self):
for l, r, lar in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], ['spam', 'eggs']],
[['spam'], ['eggs'], []],
[['spam','spam']]*3]:
with self.subTest(l=l, r=r, lar=lar):
self.assertEqual(nutils.types.frozenmultiset(l)&nutils.types.frozenmultiset(r), nutils.types.frozenmultiset(lar))
with self.subTest(l=r, r=l, lar=lar):
self.assertEqual(nutils.types.frozenmultiset(r)&nutils.types.frozenmultiset(l), nutils.types.frozenmultiset(lar))
def test_sub(self):
for l, r, lmr, rml in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], [], ['spam']],
[['spam'], ['eggs'], ['spam'], ['eggs']],
[['spam'], ['spam'], [], []]]:
with self.subTest(l=l, r=r, lmr=lmr):
self.assertEqual(nutils.types.frozenmultiset(l)-nutils.types.frozenmultiset(r), nutils.types.frozenmultiset(lmr))
with self.subTest(l=r, r=l, lmr=rml):
self.assertEqual(nutils.types.frozenmultiset(r)-nutils.types.frozenmultiset(l), nutils.types.frozenmultiset(rml))
def test_pickle(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = pickle.loads(pickle.dumps(nutils.types.frozenmultiset(src)))
self.assertIsInstance(frozen, nutils.types.frozenmultiset)
self.assertEqual(frozen, nutils.types.frozenmultiset(src))
def test_hash(self):
src = 'spam', 'bacon', 'sausage', 'spam'
ref = nutils.types.frozenmultiset(src)
for perm in itertools.permutations(src):
with self.subTest(perm=perm):
self.assertEqual(hash(nutils.types.frozenmultiset(src)), hash(ref))
def test_nutils_hash(self):
for perm in itertools.permutations(('spam', 'bacon', 'sausage', 'spam')):
with self.subTest(perm=perm):
frozen = nutils.types.frozenmultiset(perm)
self.assertEqual(nutils.types.nutils_hash(frozen).hex(), 'f3fd9c6d4741af2e67973457ee6308deddcb714c')
def test_eq(self):
src = 'spam', 'bacon', 'sausage', 'spam'
ref = nutils.types.frozenmultiset(src)
for perm in itertools.permutations(src):
with self.subTest(perm=perm):
self.assertEqual(nutils.types.frozenmultiset(src), ref)
def test_contains(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = nutils.types.frozenmultiset(src)
for item in 'spam', 'bacon', 'eggs':
with self.subTest(item=item):
if item in src:
self.assertIn(item, frozen)
else:
self.assertNotIn(item, frozen)
def test_len(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = nutils.types.frozenmultiset(src)
self.assertEqual(len(frozen), len(src))
def test_nonzero(self):
self.assertTrue(nutils.types.frozenmultiset(['spam', 'eggs']))
self.assertFalse(nutils.types.frozenmultiset([]))
def test_add(self):
l = nutils.types.frozenmultiset(['spam', 'bacon'])
r = nutils.types.frozenmultiset(['sausage', 'spam'])
lpr = nutils.types.frozenmultiset(['spam', 'bacon', 'sausage', 'spam'])
self.assertEqual(l+r, lpr)
def test_isdisjoint(self):
for l, r, disjoint in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], False],
[['spam'], ['eggs'], True],
[['spam'], ['spam'], False]]:
with self.subTest(l=l, r=r, disjoint=disjoint):
self.assertEqual(nutils.types.frozenmultiset(l).isdisjoint(nutils.types.frozenmultiset(r)), disjoint)
class frozenarray(TestCase):
def _test_constructor(self, src, frozen_dtype, src_types=(list,numpy.array,nutils.types.frozenarray)):
src = list(src)
for copy in True, False:
for src_type in src_types:
with self.subTest(copy=copy, src_type=src_type):
frozen = nutils.types.frozenarray(src_type(src), copy=copy, dtype=frozen_dtype)
self.assertIsInstance(frozen, nutils.types.frozenarray)
self.assertEqual(frozen.tolist(), src)
def _test_constructor_raises(self, src, frozen_dtype, exc_type, exc_regex):
src = list(src)
for copy in True, False:
for src_type in list, numpy.array, nutils.types.frozenarray:
with self.subTest(copy=copy, src_type=src_type), self.assertRaisesRegex(exc_type, exc_regex):
nutils.types.frozenarray(src_type(src), copy=copy, dtype=frozen_dtype)
def test_constructor_bool(self):
self._test_constructor((False, True), bool)
def test_constructor_bool_emptyarray(self):
self._test_constructor((), bool, src_types=[list])
def test_constructor_int(self):
self._test_constructor((0,1), int)
def test_constructor_int_upcast(self):
self._test_constructor((False,True), int)
def test_constructor_int_downcast(self):
self._test_constructor((0.,1.), int)
def test_constructor_int_emptyarray(self):
self._test_constructor((), int, src_types=[list])
def test_constructor_float(self):
self._test_constructor((0.,1.), float)
def test_constructor_float_upcast(self):
self._test_constructor((0,1), float)
def test_constructor_float_downcast(self):
src = [0.+0j,1.+0j]
for copy in True, False:
with self.subTest(copy=copy, src_type=list), self.assertRaises(TypeError):
nutils.types.frozenarray(src, copy=copy, dtype=float)
for src_type in numpy.array, nutils.types.frozenarray:
with self.subTest(copy=copy, src_type=src_type), self.assertWarns(numpy.ComplexWarning):
nutils.types.frozenarray(src_type(src), copy=copy, dtype=float)
def test_constructor_complex(self):
self._test_constructor((0+0j,1+1j), complex)
def test_constructor_strictint(self):
self._test_constructor((0,1), nutils.types.strictint)
def test_constructor_strictint_upcast(self):
self._test_constructor((False,True), nutils.types.strictint)
def test_constructor_strictint_downcast(self):
self._test_constructor_raises((0.,1.), nutils.types.strictint, ValueError, '^downcasting .* is forbidden$')
def test_constructor_strictint_emptyarray(self):
self._test_constructor((), nutils.types.strictint, src_types=[list])
def test_constructor_strictfloat(self):
self._test_constructor((0.,1.), nutils.types.strictfloat)
def test_constructor_strictfloat_upcast(self):
self._test_constructor((0,1), nutils.types.strictfloat)
def test_constructor_strictfloat_downcast(self):
self._test_constructor_raises((0.+0j,1.+0j), nutils.types.strictfloat, ValueError, '^downcasting .* is forbidden$')
def test_constructor_invalid_dtype(self):
self._test_constructor_raises((0,1), list, ValueError, '^unsupported dtype:')
def test_clsgetitem(self):
src = [0.,1.]
frozen = nutils.types.frozenarray[nutils.types.strictfloat](src)
self.assertIsInstance(frozen, nutils.types.frozenarray)
self.assertEqual(frozen.tolist(), src)
def test_clsgetitem_invalid(self):
src = [0.,1.]
with self.assertRaises(ValueError):
nutils.types.frozenarray[nutils.types.strictint](src)
def test_nutils_hash(self):
a = nutils.types.frozenarray(numpy.array([[1,2],[3,4]], numpy.int64))
b = nutils.types.frozenarray(numpy.array([[1,3],[2,4]], numpy.int64))
self.assertNotEqual(nutils.types.nutils_hash(a).hex(), nutils.types.nutils_hash(b).hex())
self.assertEqual(nutils.types.nutils_hash(a).hex(), nutils.types.nutils_hash(b.T).hex())
self.assertEqual(nutils.types.nutils_hash(a).hex(), '42cc3a5e1216c1f0a9921a61a3a2c67025c98d69')
self.assertEqual(nutils.types.nutils_hash(b).hex(), '8f0c9f9a118c42c258f1e69e374aadda99b4be97')
def test_pickle(self):
src = [[1,2],[3,4]]
value = pickle.loads(pickle.dumps(nutils.types.frozenarray(src)))
self.assertIsInstance(value, nutils.types.frozenarray)
self.assertEqual(value, nutils.types.frozenarray(src))
def test_eq_same_instance(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertEqual(a, a)
def test_eq_not_frozenarray(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertNotEqual(a, [[1,2],[3,4]])
def test_eq_same_base(self):
base = numpy.array([[1,2],[3,4]], int)
a = nutils.types.frozenarray(base, copy=False)
b = nutils.types.frozenarray(base, copy=False)
self.assertEqual(a, b)
def test_eq_different_array(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,3],[2,4]], int)
self.assertNotEqual(a, b)
def test_eq_different_dtype(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,2],[3,4]], float)
self.assertNotEqual(a, b)
def test_eq_different_base(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertEqual(a, b)
def test_ineq_equal(self):
l = nutils.types.frozenarray([1,2], int)
r = nutils.types.frozenarray([1,2], int)
self.assertFalse(l < r)
self.assertTrue(l <= r)
self.assertFalse(l > r)
self.assertTrue(l >= r)
def test_ineq_smaller(self):
l = nutils.types.frozenarray([1,2], int)
r = nutils.types.frozenarray([2,1], int)
self.assertTrue(l < r)
self.assertTrue(l <= r)
self.assertFalse(l > r)
self.assertFalse(l >= r)
def test_ineq_larger(self):
l = nutils.types.frozenarray([2,1], int)
r = nutils.types.frozenarray([1,2], int)
self.assertFalse(l < r)
self.assertFalse(l <= r)
self.assertTrue(l > r)
self.assertTrue(l >= r)
def test_ineq_incomparable(self):
array = nutils.types.frozenarray([1,2], int)
for op in operator.lt, operator.le, operator.gt, operator.ge:
with self.subTest(op=op), self.assertRaises(TypeError):
op(array, 1)
def test_full(self):
self.assertEqual(nutils.types.frozenarray.full([2,3], 1.5), nutils.types.frozenarray([[1.5]*3]*2, float))
def test_as_numpy_array(self):
a = numpy.array(nutils.types.frozenarray([1,2]))
self.assertIsInstance(a, numpy.ndarray)
class c_array(TestCase):
def test_idempotence(self):
a = numpy.array([1,2,3], dtype=numpy.int64)
P = nutils.types.c_array[numpy.int64]
a_ct = P(a)
self.assertEqual(P(a_ct), a_ct)
def test_list(self):
a = [1,2,3]
a_ct = nutils.types.c_array[numpy.int64](a)
self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1)
def test_array(self):
a = numpy.array([1,2,3], dtype=numpy.int64)
a_ct = nutils.types.c_array[numpy.int64](a)
self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1)
def test_array_invalid_dtype(self):
a = numpy.array([1,2,3], dtype=numpy.int32)
with self.assertRaisesRegex(ValueError, '^Expected dtype .* but array has dtype .*\\.$'):
a_ct = nutils.types.c_array[numpy.int64](a)
def test_array_noncontinguous(self):
a = numpy.array([[1,2],[3,4]], dtype=numpy.int32).T
with self.assertRaisesRegex(ValueError, '^Array is not contiguous\\.$'):
a_ct = nutils.types.c_array[numpy.int64](a)
def test_wo_getitem(self):
with self.assertRaises(TypeError):
nutils.types.c_array()
class T_Immutable(nutils.types.Immutable):
def __init__(self, x, y, *, z):
pass
class T_Singleton(nutils.types.Singleton):
def __init__(self, x, y, *, z):
pass
@parametrize
class ImmutableFamily(TestCase):
def test_pickle(self):
T = {nutils.types.Immutable: T_Immutable, nutils.types.Singleton: T_Singleton}[self.cls]
a = T(1, 2, z=3)
b = pickle.loads(pickle.dumps(a))
self.assertEqual(a, b)
def test_eq(self):
class T(self.cls):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
self.assertEqual(T(1, 2), T(1, 2))
self.assertNotEqual(T(1, 2), T(2, 1))
self.assertNotEqual(T(1, 2), U(1, 2))
def test_canonical_args(self):
class T(self.cls):
def __init__(self, x, y, z=3):
pass
self.assertEqual(T(x=1, y=2), T(1, 2, 3))
def test_keyword_args(self):
class T(self.cls):
def __init__(self, x, y, **kwargs):
pass
a = T(x=1, y=2, z=3)
b = T(1, 2, z=3)
self.assertEqual(a, b)
def test_preprocessors(self):
class T(self.cls):
@nutils.types.apply_annotations
def __init__(self, x: int):
pass
self.assertEqual(T(1), T('1'))
self.assertEqual(T(1), T(x='1'))
def test_nutils_hash(self):
class T(self.cls):
def __init__(self, x, y):
pass
class T1(self.cls, version=1):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
self.assertEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(T(1, 2)).hex())
self.assertNotEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(T(2, 1)).hex())
self.assertNotEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(U(1, 2)).hex())
# Since the hash does not include base classes, the hashes of Immutable and Singleton are the same.
self.assertEqual(nutils.types.nutils_hash(T(1, 2)).hex(), '8c3ba8f0d9eb054ab192f4e4e2ba7442564bdf85')
self.assertEqual(nutils.types.nutils_hash(T1(1, 2)).hex(), 'bab4ee65b5189f544a4242f0e386af76cfa6e31d')
@parametrize.enable_if(lambda cls: cls is nutils.types.Singleton)
def test_deduplication(self):
class T(self.cls):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
a = T(1, 2)
b = T(1, 2)
c = T(2, 1)
d = U(1, 2)
self.assertIs(a, b)
self.assertEqual(a, b)
self.assertIsNot(a, c)
self.assertNotEqual(a, c)
self.assertIsNot(a, d)
self.assertNotEqual(a, d)
ImmutableFamily(cls=nutils.types.Immutable)
ImmutableFamily(cls=nutils.types.Singleton)
# vim:sw=2:sts=2:et
| true | true |
f7141225b2cad5e68e436f1ea8c5e6741e83043d | 1,244 | py | Python | DATA_SORT/3cities/SCAM/outputcesmscam_TREFHT_CLM5_CLM5F_001.py | islasimpson/snowpaper_2022 | d6ee677f696d7fd6e7cadef8168ce4fd8b184cac | [
"Apache-2.0"
] | null | null | null | DATA_SORT/3cities/SCAM/outputcesmscam_TREFHT_CLM5_CLM5F_001.py | islasimpson/snowpaper_2022 | d6ee677f696d7fd6e7cadef8168ce4fd8b184cac | [
"Apache-2.0"
] | null | null | null | DATA_SORT/3cities/SCAM/outputcesmscam_TREFHT_CLM5_CLM5F_001.py | islasimpson/snowpaper_2022 | d6ee677f696d7fd6e7cadef8168ce4fd8b184cac | [
"Apache-2.0"
] | null | null | null | import importlib
import xarray as xr
import numpy as np
import pandas as pd
import sys
from CASutils import filter_utils as filt
from CASutils import readdata_utils as read
from CASutils import calendar_utils as cal
importlib.reload(filt)
importlib.reload(read)
importlib.reload(cal)
expname=['SASK_CLM5_CLM5F_01.001.FSCAM.sask_1979_2014',
'TOR_CLM5_CLM5F_01.001.FSCAM.tor_1979_2014',
'SID_SNOWD_SNOWDF_01.001.FSCAM.sidsnowd1']
outname='SCAM_CLM5_CLM5F_001'
cityname=['Saskatoon','Toronto','Siderovsk']
citylon=[253.330, 280.617, 82.3139]
citylat=[52.1579, 43.6532, 66.5973]
for icity in np.arange(0,3,1):
basedir="/project/cas02/islas/CLM5_CLM4/raw/SCAM_new_lowrelax/"
pathout="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/"
fpath=basedir+expname[icity]+"/atm/hist/h0concat.nc"
print(fpath)
dat = read.read_sfc_cesm(fpath,"1979-01-01T12:00:00","2014-12-31T12:00:00")
if (icity == 0):
trefht = xr.DataArray(np.zeros([dat.time.size, 3]), coords=[dat.time, cityname],
dims=['time','city'], name='trefht')
trefht[:,icity] = dat.TREFHT.isel(lon=0,lat=0)
trefht.to_netcdf(path=pathout+"TREFHT_"+outname+".nc")
| 29.619048 | 88 | 0.696945 | import importlib
import xarray as xr
import numpy as np
import pandas as pd
import sys
from CASutils import filter_utils as filt
from CASutils import readdata_utils as read
from CASutils import calendar_utils as cal
importlib.reload(filt)
importlib.reload(read)
importlib.reload(cal)
expname=['SASK_CLM5_CLM5F_01.001.FSCAM.sask_1979_2014',
'TOR_CLM5_CLM5F_01.001.FSCAM.tor_1979_2014',
'SID_SNOWD_SNOWDF_01.001.FSCAM.sidsnowd1']
outname='SCAM_CLM5_CLM5F_001'
cityname=['Saskatoon','Toronto','Siderovsk']
citylon=[253.330, 280.617, 82.3139]
citylat=[52.1579, 43.6532, 66.5973]
for icity in np.arange(0,3,1):
basedir="/project/cas02/islas/CLM5_CLM4/raw/SCAM_new_lowrelax/"
pathout="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/"
fpath=basedir+expname[icity]+"/atm/hist/h0concat.nc"
print(fpath)
dat = read.read_sfc_cesm(fpath,"1979-01-01T12:00:00","2014-12-31T12:00:00")
if (icity == 0):
trefht = xr.DataArray(np.zeros([dat.time.size, 3]), coords=[dat.time, cityname],
dims=['time','city'], name='trefht')
trefht[:,icity] = dat.TREFHT.isel(lon=0,lat=0)
trefht.to_netcdf(path=pathout+"TREFHT_"+outname+".nc")
| true | true |
f714129360782fdce2e19567a3deb3c965cf7a55 | 5,386 | py | Python | chapter2/intogen-arrays/src/biomart/ent_exp.py | chris-zen/phd-thesis | 1eefdff8e7ca1910304e27ae42551dc64496b101 | [
"Unlicense"
] | 1 | 2015-12-22T00:53:18.000Z | 2015-12-22T00:53:18.000Z | chapter2/intogen-arrays/src/biomart/ent_exp.py | chris-zen/phd-thesis | 1eefdff8e7ca1910304e27ae42551dc64496b101 | [
"Unlicense"
] | null | null | null | chapter2/intogen-arrays/src/biomart/ent_exp.py | chris-zen/phd-thesis | 1eefdff8e7ca1910304e27ae42551dc64496b101 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
"""
Import experiments into the database
* Configuration parameters:
- The ones required by intogen.data.entity.EntityManagerFactory
"""
from wok.task import Task
from wok.element import DataElementList
from intogen.data.entity import types
from intogen.data.entity.server import EntityServer
from intogen.biomart import biomart_db_connect, DEFAULT_INSERT_SIZE, DEFAULT_DB_ENGINE
from intogen.sql import BatchInsert
from pubmed import Pubmed
task = Task()
@task.main()
def main():
task.check_conf(["entities", "repositories", "biomart.db"])
conf = task.conf
insert_size = conf.get("biomart.insert_size", DEFAULT_INSERT_SIZE, dtype=int)
if "biomart.study_source" in conf:
study_source_map = conf["biomart.study_source"]
else:
study_source_map = conf.create_element()
log = task.logger()
exp_port = task.ports("experiment")
es = EntityServer(conf["entities"])
em = es.manager()
conn = biomart_db_connect(conf["biomart.db"], log)
db_engine = conf.get("biomart.db.engine", DEFAULT_DB_ENGINE)
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE ent_experiment (
id int(11) NOT NULL,
exp_name varchar(64) NOT NULL,
study_id varchar(32) NOT NULL,
study_source varchar(32) DEFAULT NULL,
study_source_url varchar(512) DEFAULT NULL,
study_link varchar(512) DEFAULT NULL,
pub_pubmed varchar(32) DEFAULT NULL,
pub_title varchar(300) DEFAULT NULL,
pub_authors varchar(300) DEFAULT NULL,
pub_year varchar(16) DEFAULT NULL,
pub_journal varchar(200) DEFAULT NULL,
platf_id varchar(32) NOT NULL,
platf_title varchar(250) DEFAULT NULL,
platf_technology varchar(96) DEFAULT NULL,
PRIMARY KEY (id),
KEY exp_name (exp_name),
KEY pub_pubmed (pub_pubmed),
KEY pub_title (pub_title),
KEY pub_authors (pub_authors),
KEY pub_year (pub_year),
KEY pub_journal (pub_journal),
KEY platf_title (platf_title),
KEY platf_technology (platf_technology)
) ENGINE={} CHARACTER SET utf8 COLLATE utf8_general_ci""".format(db_engine))
ib = BatchInsert(cursor, "ent_experiment",
["id", "exp_name", "study_id", "study_source", "study_source_url", "study_link",
"pub_title", "pub_authors", "pub_year", "pub_pubmed", "pub_journal",
"platf_id", "platf_title", "platf_technology"], insert_size)
pubmed = Pubmed()
for i, exp in enumerate(exp_port, 1):
study_id = exp[0]
platform_id = exp[1]
study = em.find(study_id, types.SOURCE_STUDY)
if study is None:
log.error("{} not found: {}".format(types.SOURCE_STUDY, study_id))
continue
platf = em.find(platform_id, types.SOURCE_PLATFORM)
if platf is None:
log.error("{} not found: {}".format(types.SOURCE_PLATFORM, platform_id))
continue
log.info("Experiment for study {} and platform {} ...".format(study_id, platform_id))
pub = {}
for k in ["title", "short_authors", "date", "journal"]:
pub[k] = None
if "pubmed" in study:
pmid = study["pubmed"]
if isinstance(pmid, (DataElementList, list)):
pmid = pmid[0]
log.warn("Study {} with many pubmed_id's, only the first {} will be considered".format(study_id, pmid))
log.debug("Retrieving information for pubmed_id '{}' ...".format(pmid))
try:
pub = pubmed.find(pmid)
if len(pub) == 0:
log.error("No publication information found for pubmed_id '{}' in experiment ({}, {})".format(pmid, study_id, platform_id))
else:
pub = pub[0]
except Exception as ex:
log.error("Error retrieving pubmed information for experiment ({}, {}) with pubmed_id '{}'".format(study_id, platform_id, pmid))
log.exception(ex)
else:
pmid = None
log.warn("Study {} has no 'pubmed_id' annotation".format(study_id))
if "title" not in study:
log.error("Study {} doesn't have annotation for 'pubmed_id' nor 'title'".format(study_id))
elif "SO/contact_details[0]/contact_name" not in study \
and "SO/contact_details/contact_name" not in study:
log.error("Study {} doesn't have annotation for 'pubmed_id' nor 'SO.contact_details[0].contact_name'".format(study_id))
else:
try:
pub["title"] = study["title"]
if "SO/contact_details[0]/contact_name" in study:
pub["short_authors"] = study["SO/contact_details[0]/contact_name"]
else:
pub["short_authors"] = study["SO/contact_details/contact_name"]
if "SO/submission/pub_date" in study:
pub["date"] = study["SO/submission/pub_date"]
else:
pub["date"] = ""
except Exception as ex:
log.debug(study)
log.execption(ex)
for k, v in pub.items():
if v is not None and isinstance(v, basestring):
pub[k] = v.replace("'", r"\'")
exp_name = "{}; {}".format(study_id, platform_id)
study_source = None
study_source_url = None
study_link = None
parts = study_id.split("-")
if len(parts) >= 2 and parts[0] in study_source_map:
ss = study_source_map[parts[0]]
study_source = ss.get("name")
study_source_url = ss.get("home_url")
try:
study_link = ss.get("link", "").format(parts[1])
except:
pass
ib.insert(i, exp_name, study_id, study_source, study_source_url, study_link,
pub["title"], pub["short_authors"], pub["date"], pmid, pub["journal"],
platform_id, platf["SO/platform_title"], "")
log.debug("{} experiments inserted".format(ib.count))
ib.close()
cursor.close()
conn.close()
em.close()
es.close()
task.start()
| 30.602273 | 132 | 0.690123 |
from wok.task import Task
from wok.element import DataElementList
from intogen.data.entity import types
from intogen.data.entity.server import EntityServer
from intogen.biomart import biomart_db_connect, DEFAULT_INSERT_SIZE, DEFAULT_DB_ENGINE
from intogen.sql import BatchInsert
from pubmed import Pubmed
task = Task()
@task.main()
def main():
task.check_conf(["entities", "repositories", "biomart.db"])
conf = task.conf
insert_size = conf.get("biomart.insert_size", DEFAULT_INSERT_SIZE, dtype=int)
if "biomart.study_source" in conf:
study_source_map = conf["biomart.study_source"]
else:
study_source_map = conf.create_element()
log = task.logger()
exp_port = task.ports("experiment")
es = EntityServer(conf["entities"])
em = es.manager()
conn = biomart_db_connect(conf["biomart.db"], log)
db_engine = conf.get("biomart.db.engine", DEFAULT_DB_ENGINE)
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE ent_experiment (
id int(11) NOT NULL,
exp_name varchar(64) NOT NULL,
study_id varchar(32) NOT NULL,
study_source varchar(32) DEFAULT NULL,
study_source_url varchar(512) DEFAULT NULL,
study_link varchar(512) DEFAULT NULL,
pub_pubmed varchar(32) DEFAULT NULL,
pub_title varchar(300) DEFAULT NULL,
pub_authors varchar(300) DEFAULT NULL,
pub_year varchar(16) DEFAULT NULL,
pub_journal varchar(200) DEFAULT NULL,
platf_id varchar(32) NOT NULL,
platf_title varchar(250) DEFAULT NULL,
platf_technology varchar(96) DEFAULT NULL,
PRIMARY KEY (id),
KEY exp_name (exp_name),
KEY pub_pubmed (pub_pubmed),
KEY pub_title (pub_title),
KEY pub_authors (pub_authors),
KEY pub_year (pub_year),
KEY pub_journal (pub_journal),
KEY platf_title (platf_title),
KEY platf_technology (platf_technology)
) ENGINE={} CHARACTER SET utf8 COLLATE utf8_general_ci""".format(db_engine))
ib = BatchInsert(cursor, "ent_experiment",
["id", "exp_name", "study_id", "study_source", "study_source_url", "study_link",
"pub_title", "pub_authors", "pub_year", "pub_pubmed", "pub_journal",
"platf_id", "platf_title", "platf_technology"], insert_size)
pubmed = Pubmed()
for i, exp in enumerate(exp_port, 1):
study_id = exp[0]
platform_id = exp[1]
study = em.find(study_id, types.SOURCE_STUDY)
if study is None:
log.error("{} not found: {}".format(types.SOURCE_STUDY, study_id))
continue
platf = em.find(platform_id, types.SOURCE_PLATFORM)
if platf is None:
log.error("{} not found: {}".format(types.SOURCE_PLATFORM, platform_id))
continue
log.info("Experiment for study {} and platform {} ...".format(study_id, platform_id))
pub = {}
for k in ["title", "short_authors", "date", "journal"]:
pub[k] = None
if "pubmed" in study:
pmid = study["pubmed"]
if isinstance(pmid, (DataElementList, list)):
pmid = pmid[0]
log.warn("Study {} with many pubmed_id's, only the first {} will be considered".format(study_id, pmid))
log.debug("Retrieving information for pubmed_id '{}' ...".format(pmid))
try:
pub = pubmed.find(pmid)
if len(pub) == 0:
log.error("No publication information found for pubmed_id '{}' in experiment ({}, {})".format(pmid, study_id, platform_id))
else:
pub = pub[0]
except Exception as ex:
log.error("Error retrieving pubmed information for experiment ({}, {}) with pubmed_id '{}'".format(study_id, platform_id, pmid))
log.exception(ex)
else:
pmid = None
log.warn("Study {} has no 'pubmed_id' annotation".format(study_id))
if "title" not in study:
log.error("Study {} doesn't have annotation for 'pubmed_id' nor 'title'".format(study_id))
elif "SO/contact_details[0]/contact_name" not in study \
and "SO/contact_details/contact_name" not in study:
log.error("Study {} doesn't have annotation for 'pubmed_id' nor 'SO.contact_details[0].contact_name'".format(study_id))
else:
try:
pub["title"] = study["title"]
if "SO/contact_details[0]/contact_name" in study:
pub["short_authors"] = study["SO/contact_details[0]/contact_name"]
else:
pub["short_authors"] = study["SO/contact_details/contact_name"]
if "SO/submission/pub_date" in study:
pub["date"] = study["SO/submission/pub_date"]
else:
pub["date"] = ""
except Exception as ex:
log.debug(study)
log.execption(ex)
for k, v in pub.items():
if v is not None and isinstance(v, basestring):
pub[k] = v.replace("'", r"\'")
exp_name = "{}; {}".format(study_id, platform_id)
study_source = None
study_source_url = None
study_link = None
parts = study_id.split("-")
if len(parts) >= 2 and parts[0] in study_source_map:
ss = study_source_map[parts[0]]
study_source = ss.get("name")
study_source_url = ss.get("home_url")
try:
study_link = ss.get("link", "").format(parts[1])
except:
pass
ib.insert(i, exp_name, study_id, study_source, study_source_url, study_link,
pub["title"], pub["short_authors"], pub["date"], pmid, pub["journal"],
platform_id, platf["SO/platform_title"], "")
log.debug("{} experiments inserted".format(ib.count))
ib.close()
cursor.close()
conn.close()
em.close()
es.close()
task.start()
| true | true |
f714170b4a34daae5309a0afed9dc4fc7ef8fc70 | 1,331 | py | Python | app/core/tests/test_models.py | Aqurds/django-rest-api | 685215f05c1eba8a7ef64f3f49dbd33465544099 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | Aqurds/django-rest-api | 685215f05c1eba8a7ef64f3f49dbd33465544099 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | Aqurds/django-rest-api | 685215f05c1eba8a7ef64f3f49dbd33465544099 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with email """
email = "test@aqurds.com"
password = "aqurds123"
user = get_user_model().objects.create_user(email, password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_email_normalize(self):
"""Test if the email is normalized or not"""
email = "test@AQURDS.COM"
password = "aqurds123"
user = get_user_model().objects.create_user(email, password)
self.assertEqual(user.email, email.lower())
def test_email_validation_for_user(self):
"""Test will validate user email.
None is not allowed and will raise ValueError"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, "aqurds123")
def test_create_super_user(self):
"""Test creating a new super user with email"""
email = "super_user@aqurds.com"
password = "super_user_123"
super_user = get_user_model().objects.create_superuser(email, password)
self.assertTrue(super_user.is_superuser)
self.assertTrue(super_user.is_staff)
| 35.026316 | 79 | 0.681443 | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = "test@aqurds.com"
password = "aqurds123"
user = get_user_model().objects.create_user(email, password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_email_normalize(self):
email = "test@AQURDS.COM"
password = "aqurds123"
user = get_user_model().objects.create_user(email, password)
self.assertEqual(user.email, email.lower())
def test_email_validation_for_user(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, "aqurds123")
def test_create_super_user(self):
email = "super_user@aqurds.com"
password = "super_user_123"
super_user = get_user_model().objects.create_superuser(email, password)
self.assertTrue(super_user.is_superuser)
self.assertTrue(super_user.is_staff)
| true | true |
f714177fea775d7ec4d451e8fd295b515bc257fe | 14,092 | py | Python | hddcoin/hodl/val.py | u4ma-hdd/hddcoin-blockchain | 4199d1f1d87e129ae9c08bf50dd48ec3b2c08727 | [
"Apache-2.0"
] | 37 | 2021-07-08T23:42:01.000Z | 2022-03-26T21:30:10.000Z | hddcoin/hodl/val.py | u4ma-hdd/hddcoin-blockchain | 4199d1f1d87e129ae9c08bf50dd48ec3b2c08727 | [
"Apache-2.0"
] | 13 | 2021-07-11T15:12:01.000Z | 2022-03-15T08:36:18.000Z | hddcoin/hodl/val.py | u4ma-hdd/hddcoin-blockchain | 4199d1f1d87e129ae9c08bf50dd48ec3b2c08727 | [
"Apache-2.0"
] | 19 | 2021-07-10T14:09:07.000Z | 2022-03-14T11:17:05.000Z | # -*- coding: utf-8 -*-
# NOTES:
# - this file is all about the trust model for the HODL contracts. TRUST NO ONE. VALIDATE ALL.
from __future__ import annotations
import dataclasses
import decimal
import re
import time
import typing as th
import hddcoin.hodl
from clvm_tools.binutils import disassemble, int_to_bytes #type:ignore
from hddcoin.hodl import exc as exc
from hddcoin.hodl.ContractDetails import ContractDetails
from hddcoin.hodl.util import vlog, puzhash2addr
from hddcoin.types.blockchain_format.program import Program, SerializedProgram
from hddcoin.types.blockchain_format.sized_bytes import bytes32
from hddcoin.util.byte_types import hexstr_to_bytes
SECONDS_PER_MONTH = int(86400 * 365 / 12)
conPat = (
'\(a\ \(q\ 4\ \(c\ 44\ \(c\ 11\ \(\)\)\)\ \(c\ \(c\ 92\ \(c\ 23\ \(\)\)\)\ \(c\ \(c\ 52\ \('
'q\ 1\)\)\ \(a\ \(i\ \(=\ 5\ 32\)\ \(q\ 4\ \(c\ 36\ \(c\ 34\ \(c\ 50\ \(\)\)\)\)\ \(a\ \(i'
'\ \(>\ 11\ 38\)\ \(q\ 4\ \(c\ 90\ \(c\ 46\ \(c\ 38\ \(\)\)\)\)\ \(c\ \(c\ 90\ \(c\ 54\ \(c'
'\ \(\-\ 11\ 38\)\ \(\)\)\)\)\ \(\)\)\)\ \(q\ 4\ \(c\ 90\ \(c\ 46\ \(c\ 11\ \(\)\)\)\)\ \(\)'
'\)\)\ 1\)\)\ \(q\ 2\ \(i\ \(=\ 5\ 48\)\ \(q\ 2\ \(i\ \(any\ \(>\ \(/\ \(\*\ \(q\ \.\ 1000'
'\)\ 94\)\ 38\)\ \(q\ \.\ 350\)\)\ \(>\ \(q\ \.\ 0x00e8d4a51000\)\ 38\)\ \(>\ 38\ \(q\ \.\ 0'
'x0d8d726b7177a80000\)\)\)\ \(q\ 8\)\ \(q\ 4\ \(c\ 44\ \(c\ 38\ \(\)\)\)\ \(c\ \(c\ 90\ \(c'
'\ 23\ \(c\ \(\+\ 38\ 94\)\ \(\)\)\)\)\ \(c\ \(c\ 122\ \(c\ 50\ \(\)\)\)\ \(\)\)\)\)\)\ 1\)'
'\ \(q\ 2\ \(i\ \(=\ 5\ 56\)\ \(q\ 4\ \(c\ 44\ \(c\ \(\+\ 38\ 94\)\ \(\)\)\)\ \(c\ \(c\ 124'
'\ \(c\ 126\ \(\)\)\)\ \(c\ \(c\ 90\ \(c\ 46\ \(c\ \(\+\ 38\ 94\)\ \(\)\)\)\)\ \(\)\)\)\)\ '
'\(q\ 2\ \(i\ \(=\ 5\ 40\)\ \(q\ 8\ 42\ 50\ 38\ 94\ 126\ 46\)\ \(q\ 8\)\)\ 1\)\)\ 1\)\)\ 1'
'\)\)\ 1\)\)\)\)\ \(c\ \(q\ \(\(\(q\ \.\ 2\)\ 4\ \.\ 3\)\ \(50\ \.\ 82\)\ 73\ 72\ \.\ 81\)\ '
'\(\((?P<v7>.*)\ \.\ (?P<v5>.*)\)\ (?P<v6>.*)\ 51\ \.\ 62\)\ \((?P<v1>.*)\ \.\ (?P<v8>.*)\)'
'\ (?P<v2>.*)\ (?P<v4>.*)\ \.\ (?P<v3>.*)\)\ 1\)\)'
)
@dataclasses.dataclass
class BakedInTerms:
deposit_bytes: int
payout_puzhash: str
payout_tstamp: int
reward_bytes: int
contract_id: str
program_name: str
client_pubkey: str
def _cmpRct(tok: str, expected: th.Any, received: th.Any) -> None:
if expected != received:
raise exc.ContractValidationError(f"Unexpected receipt value for {tok}: {received}")
def _cmpCon(tok: str, expected: th.Any, received: th.Any) -> None:
if expected != received:
raise exc.ContractValidationError(
f"Unexpected contract value for {tok}. Expected: {expected}; Received: {received}")
def _atomReprAsInt(s: str) -> int:
"""Translate CLVM atom repr to int."""
if s.startswith("0x"):
return int(s, base=16)
elif s.startswith('"'):
return int.from_bytes(s[1:-1].encode("ascii"), "big")
return int(s)
def _atomReprAsStr(s: str) -> str:
"""Translate CLVM atom repr to str."""
if s.startswith("0x"):
return bytes.fromhex(s[2:]).decode("ascii")
elif s.startswith('"'):
return s[1:-1]
return int_to_bytes(int(s)).decode("ascii")
def _atomReprAsHex(s: str) -> str:
"""Translate CLVM integer atom repr to a 0x-prefixed hex string."""
if s.startswith("0x"):
return s
elif s.startswith('"'):
return "0x" + s[1:-1].encode("ascii").hex()
return hex(int(s))
def _extractBakedInTerms(reveal: str) -> BakedInTerms:
try:
m = th.cast(re.Match, re.search(conPat,
disassemble(Program.from_bytes(hexstr_to_bytes(reveal)))))
yum = BakedInTerms(
deposit_bytes = _atomReprAsInt(m.group("v1")),
payout_puzhash = _atomReprAsHex(m.group("v2")),
payout_tstamp = _atomReprAsInt(m.group("v3")),
reward_bytes = _atomReprAsInt(m.group("v4")),
contract_id = _atomReprAsHex(m.group("v5")),
program_name = _atomReprAsStr(m.group("v6")),
client_pubkey = _atomReprAsHex(m.group("v7")),
)
except Exception:
raise exc.ContractValidationError("Contract reveal is not valid.")
return yum
def _validatePuzzleHash(addr: str, reveal: str) -> bytes32:
sp: SerializedProgram = SerializedProgram.fromhex(reveal)
ph = hddcoin.hodl.util.addr2puzhash(addr)
ph_b32 = sp.get_tree_hash()
if ph != ph_b32.hex():
raise exc.ContractValidationError(f"Reveal does not match address")
return ph_b32
def validateContract(# Given to server...
ex_program_name: str,
ex_deposit_bytes: int,
ex_payout_address: str,
ex_client_pubkey: str,
# Expected from server based on program details we had...
ex_term_in_months: decimal.Decimal,
ex_reward_percent: decimal.Decimal,
receipt: th.Dict[str, th.Any],
) -> None: # raises exc.ContractValidationError on issues
"""Make sure that the receipt, and instructions therein, are what we expect.
Raises exc.ContractValidationError if any issues are found.
"""
# The overall trust model here is: TRUST NO ONE. THESE ARE MY PRECIOUS HDDs!!
#
# In the comments below, there are two parties:
#
# 1. The "client" --> This hddcoin application (i.e. this code) or the person running it
# 2. The "server" --> The HODL server that has been contacted to provide contract terms,
# which include a specific contract/puzzle to send an amount to.
#
# Although the HDDcoin team are certainly a trustable bunch and can be expected to provide the
# correct/expected contract terms to the client to follow, if the client is concerned about
# overall security and precious HDD funds (which the client obviously should be!!), the client
# should ABSOLUTELY ASSUME THAT THE SERVER IS NOT TRUSTABLE, UNTIL VERIFIED. More specifically,
# the client should assume that whoever/whatever provided the client the contract terms to
# follow could definitely have been compromised by EVIL HACKERS AFTER THE CLIENT'S PRECIOUS HDD.
#
# Nasty scenarios we should be concerned about include (with overlapping concerns):
#
# 1. the HODL API server could have been hacked
# 2. there could be a man-in-the-middle attack happening, making data untrustworthy
# 3. the contract terms provided could have been falsified in some/any way
# 4. the on-chain contract (smart coin via puzzlehash/reveal) could be bogus
# 5. sneaky hacker farmers could mess with how pushed coins/puzzles are processed on-chain
# 6. and more!
#
# With these concerns in mind, the client needs to be sure that everything is secure before
# committing funds on-chain. The smart contract itself provides excellent on-chain security to
# make sure that no adverse shenanigans can happen once funds are on chain. The purpose in this
# `validateContract` function is to make sure that there are no other surprises in store (as
# listed above).
#
# As stated in the docstring: This function makes sure that sure that the provided contract is
# what the client expects.
#
# What the HODL contract is all about is providing a secure conditional lockbox where:
#
# A) the client can stash a deposit into the box that ONLY THE CLIENT CAN EVER ACCESS
# B) a secure way is provided for the server (i.e. the HDDcoin team) to add the guaranteed
# reward to the lockbox for later payout (at end of contract)
# - IMPORTANT NOTE: the server can never access the deposit in any way whatsoever
# - the HDDcoin team gets reward funds from a HODL reserve in the pre-farm funds
# C) if the client meets the contract terms (i.e. the HODL deposit sits in the box for the
# length of the term), both the deposit and the reward pay out to the client's wallet
# D) if the client decides to cancel the contract, the deposit is returned to the client, and
# the guaranteed reward is returned to the HDDcoin HODL reserve
# - ONLY THE CLIENT CAN EVER CANCEL THE CONTRACT. NOBODY ELSE.
# - once the reward is added, it is GUARANTEED for the client (unless canceled). Sweet!
# E) there are other various bits involved... but they mostly revolve around ensuring that
# the mechanics of the contract are secure against nefarious hackers... I see you there
# reading this... SHOO!! Go away!! ¬_¬
#
# All of those listed things are *if all is as expected*. Again, this is what this validation
# function is about. Even if the server is compromised (which it should not be, but... TRUST
# NOBODY!), the client's HDD must NEVER be placed at risk here. This is fundamental to the HODL
# program, and is supported through all supporting client code, server code, and on-chain code.
vlog(1, "Extracting receipt fields for validation")
try:
rx_program_name = receipt["requested"]["program_name"]
rx_deposit_bytes = receipt["requested"]["deposit_bytes"]
rx_payout_address = receipt["requested"]["payout_address"]
rx_client_pubkey = receipt["requested"]["client_pubkey"]
rx_contract_id = receipt["receipt_info"]["contract_id"]
rx_contract_address = receipt["coin_details"]["contract_address"]
rx_reveal = receipt["coin_details"]["reveal"]
rx_solCancelDep = receipt["coin_details"]["solution_cancel_deposited"]
rx_solCancelGuar = receipt["coin_details"]["solution_cancel_guaranteed"]
rx_solPayout = receipt["coin_details"]["solution_payout"]
except KeyError as e:
raise exc.ContractValidationError(f"Missing receipt key: {e.args[0]}")
# Check the receipt fields (which don't matter that much, but still...)
vlog(1, "Validating requested vs received")
_cmpRct("program_name", ex_program_name, rx_program_name)
_cmpRct("deposit_bytes", ex_deposit_bytes, rx_deposit_bytes)
_cmpRct("payout_address", ex_payout_address, rx_payout_address)
_cmpRct("client_pubkey", ex_client_pubkey, rx_client_pubkey)
# Contract address and reveal must match...
vlog(1, "Validating puzzle hash")
ph_b32 = _validatePuzzleHash(rx_contract_address, rx_reveal)
# Reveal must be the contract we expect...
vlog(1, "Validating puzzle reveal")
ex_payout_ph = f"0x{hddcoin.hodl.util.addr2puzhash(ex_payout_address)}"
ex_reward_bytes = int(ex_deposit_bytes * (ex_reward_percent / 100))
epoch_s = int(time.time())
ex_payout_tstamp = int(epoch_s + (ex_term_in_months * SECONDS_PER_MONTH))
try:
terms = _extractBakedInTerms(rx_reveal)
_cmpCon("deposit_bytes", ex_deposit_bytes, terms.deposit_bytes)
_cmpCon("payout_address", ex_payout_ph, terms.payout_puzhash)
_cmpCon("reward_bytes", ex_reward_bytes, terms.reward_bytes)
_cmpCon("contract_id", f"0x{rx_contract_id}", terms.contract_id)
_cmpCon("program_name", ex_program_name, terms.program_name)
_cmpCon("client_pubkey", f"0x{ex_client_pubkey}", terms.client_pubkey)
except Exception as e:
raise exc.ContractValidationError(f"Error validating contract terms: {e!r}")
if abs(ex_payout_tstamp - terms.payout_tstamp) > 3600: # 1h good enough for validation
msg = f"Unexpected contract value for payout_timestamp: {terms.payout_tstamp}"
raise exc.ContractValidationError(msg)
# Solutions must match...
vlog(1, "Validating solutions")
ex_solCancelDep = str(Program.to([1, ex_deposit_bytes, ph_b32]))
ex_solCancelGuar = str(Program.to([1, ex_deposit_bytes + ex_reward_bytes, ph_b32]))
ex_solPayout = str(Program.to([3, ex_deposit_bytes + ex_reward_bytes, ph_b32]))
_cmpRct("solution_cancel_deposited", ex_solCancelDep, rx_solCancelDep)
_cmpRct("solution_cancel_guaranteed", ex_solCancelGuar, rx_solCancelGuar)
_cmpRct("solution_payout", ex_solPayout, rx_solPayout)
# ALL IS WELL IF WE GOT HERE!
vlog(1, "Contract provided by server is as expected!")
def validateCancellation(ex_contract_id: str,
contractDetails: ContractDetails,
) -> None:
"""Makes sure that the contract details fetched from the HODL server by the cancel request are a
match to what the user expects."""
# This is essentially just cross-checking the contract dict details with what is actually in the
# reveal. We don't need to validate the cancellation solutions since we don't use/need them.
# Those are only for users who want to do it on their own without HODL tooling.
rx_contract_id = contractDetails.contract_id
rx_contract_address = contractDetails.contract_address
rx_reveal = contractDetails.puzzle_reveal
if rx_contract_id != ex_contract_id:
raise exc.CancelValidationError("contract_id mismatch")
vlog(1, "Validating puzzle hash")
_validatePuzzleHash(rx_contract_address, rx_reveal)
vlog(1, "Validating puzzle reveal")
# Not much to validate here. If it is the right contract form, it can only be a HODL contract.
# Even still, to be ABSOLUTELY sure, we'll validate that the baked-in terms match the contract
# details displayed to the user.
terms = _extractBakedInTerms(rx_reveal)
_cmpCon("deposit_bytes", contractDetails.deposit_bytes, terms.deposit_bytes)
_cmpCon("payout_address", contractDetails.payout_address, puzhash2addr(terms.payout_puzhash))
_cmpCon("reward_bytes", contractDetails.reward_bytes, terms.reward_bytes)
_cmpCon("contract_id", f"0x{contractDetails.contract_id}", terms.contract_id)
_cmpCon("program_name", f"{contractDetails.program_name}", terms.program_name)
_cmpCon("client_pubkey", f"0x{contractDetails.client_pubkey}", terms.client_pubkey)
| 51.057971 | 100 | 0.648027 |
from __future__ import annotations
import dataclasses
import decimal
import re
import time
import typing as th
import hddcoin.hodl
from clvm_tools.binutils import disassemble, int_to_bytes
from hddcoin.hodl import exc as exc
from hddcoin.hodl.ContractDetails import ContractDetails
from hddcoin.hodl.util import vlog, puzhash2addr
from hddcoin.types.blockchain_format.program import Program, SerializedProgram
from hddcoin.types.blockchain_format.sized_bytes import bytes32
from hddcoin.util.byte_types import hexstr_to_bytes
SECONDS_PER_MONTH = int(86400 * 365 / 12)
conPat = (
'\(a\ \(q\ 4\ \(c\ 44\ \(c\ 11\ \(\)\)\)\ \(c\ \(c\ 92\ \(c\ 23\ \(\)\)\)\ \(c\ \(c\ 52\ \('
'q\ 1\)\)\ \(a\ \(i\ \(=\ 5\ 32\)\ \(q\ 4\ \(c\ 36\ \(c\ 34\ \(c\ 50\ \(\)\)\)\)\ \(a\ \(i'
'\ \(>\ 11\ 38\)\ \(q\ 4\ \(c\ 90\ \(c\ 46\ \(c\ 38\ \(\)\)\)\)\ \(c\ \(c\ 90\ \(c\ 54\ \(c'
'\ \(\-\ 11\ 38\)\ \(\)\)\)\)\ \(\)\)\)\ \(q\ 4\ \(c\ 90\ \(c\ 46\ \(c\ 11\ \(\)\)\)\)\ \(\)'
'\)\)\ 1\)\)\ \(q\ 2\ \(i\ \(=\ 5\ 48\)\ \(q\ 2\ \(i\ \(any\ \(>\ \(/\ \(\*\ \(q\ \.\ 1000'
'\)\ 94\)\ 38\)\ \(q\ \.\ 350\)\)\ \(>\ \(q\ \.\ 0x00e8d4a51000\)\ 38\)\ \(>\ 38\ \(q\ \.\ 0'
'x0d8d726b7177a80000\)\)\)\ \(q\ 8\)\ \(q\ 4\ \(c\ 44\ \(c\ 38\ \(\)\)\)\ \(c\ \(c\ 90\ \(c'
'\ 23\ \(c\ \(\+\ 38\ 94\)\ \(\)\)\)\)\ \(c\ \(c\ 122\ \(c\ 50\ \(\)\)\)\ \(\)\)\)\)\)\ 1\)'
'\ \(q\ 2\ \(i\ \(=\ 5\ 56\)\ \(q\ 4\ \(c\ 44\ \(c\ \(\+\ 38\ 94\)\ \(\)\)\)\ \(c\ \(c\ 124'
'\ \(c\ 126\ \(\)\)\)\ \(c\ \(c\ 90\ \(c\ 46\ \(c\ \(\+\ 38\ 94\)\ \(\)\)\)\)\ \(\)\)\)\)\ '
'\(q\ 2\ \(i\ \(=\ 5\ 40\)\ \(q\ 8\ 42\ 50\ 38\ 94\ 126\ 46\)\ \(q\ 8\)\)\ 1\)\)\ 1\)\)\ 1'
'\)\)\ 1\)\)\)\)\ \(c\ \(q\ \(\(\(q\ \.\ 2\)\ 4\ \.\ 3\)\ \(50\ \.\ 82\)\ 73\ 72\ \.\ 81\)\ '
'\(\((?P<v7>.*)\ \.\ (?P<v5>.*)\)\ (?P<v6>.*)\ 51\ \.\ 62\)\ \((?P<v1>.*)\ \.\ (?P<v8>.*)\)'
'\ (?P<v2>.*)\ (?P<v4>.*)\ \.\ (?P<v3>.*)\)\ 1\)\)'
)
@dataclasses.dataclass
class BakedInTerms:
deposit_bytes: int
payout_puzhash: str
payout_tstamp: int
reward_bytes: int
contract_id: str
program_name: str
client_pubkey: str
def _cmpRct(tok: str, expected: th.Any, received: th.Any) -> None:
if expected != received:
raise exc.ContractValidationError(f"Unexpected receipt value for {tok}: {received}")
def _cmpCon(tok: str, expected: th.Any, received: th.Any) -> None:
if expected != received:
raise exc.ContractValidationError(
f"Unexpected contract value for {tok}. Expected: {expected}; Received: {received}")
def _atomReprAsInt(s: str) -> int:
if s.startswith("0x"):
return int(s, base=16)
elif s.startswith('"'):
return int.from_bytes(s[1:-1].encode("ascii"), "big")
return int(s)
def _atomReprAsStr(s: str) -> str:
if s.startswith("0x"):
return bytes.fromhex(s[2:]).decode("ascii")
elif s.startswith('"'):
return s[1:-1]
return int_to_bytes(int(s)).decode("ascii")
def _atomReprAsHex(s: str) -> str:
if s.startswith("0x"):
return s
elif s.startswith('"'):
return "0x" + s[1:-1].encode("ascii").hex()
return hex(int(s))
def _extractBakedInTerms(reveal: str) -> BakedInTerms:
try:
m = th.cast(re.Match, re.search(conPat,
disassemble(Program.from_bytes(hexstr_to_bytes(reveal)))))
yum = BakedInTerms(
deposit_bytes = _atomReprAsInt(m.group("v1")),
payout_puzhash = _atomReprAsHex(m.group("v2")),
payout_tstamp = _atomReprAsInt(m.group("v3")),
reward_bytes = _atomReprAsInt(m.group("v4")),
contract_id = _atomReprAsHex(m.group("v5")),
program_name = _atomReprAsStr(m.group("v6")),
client_pubkey = _atomReprAsHex(m.group("v7")),
)
except Exception:
raise exc.ContractValidationError("Contract reveal is not valid.")
return yum
def _validatePuzzleHash(addr: str, reveal: str) -> bytes32:
sp: SerializedProgram = SerializedProgram.fromhex(reveal)
ph = hddcoin.hodl.util.addr2puzhash(addr)
ph_b32 = sp.get_tree_hash()
if ph != ph_b32.hex():
raise exc.ContractValidationError(f"Reveal does not match address")
return ph_b32
def validateContract(# Given to server...
ex_program_name: str,
ex_deposit_bytes: int,
ex_payout_address: str,
ex_client_pubkey: str,
# Expected from server based on program details we had...
ex_term_in_months: decimal.Decimal,
ex_reward_percent: decimal.Decimal,
receipt: th.Dict[str, th.Any],
) -> None: # raises exc.ContractValidationError on issues
# The overall trust model here is: TRUST NO ONE. THESE ARE MY PRECIOUS HDDs!!
#
# In the comments below, there are two parties:
#
# 1. The "client" --> This hddcoin application (i.e. this code) or the person running it
# 2. The "server" --> The HODL server that has been contacted to provide contract terms,
# which include a specific contract/puzzle to send an amount to.
#
# Although the HDDcoin team are certainly a trustable bunch and can be expected to provide the
# correct/expected contract terms to the client to follow, if the client is concerned about
# overall security and precious HDD funds (which the client obviously should be!!), the client
# should ABSOLUTELY ASSUME THAT THE SERVER IS NOT TRUSTABLE, UNTIL VERIFIED. More specifically,
# the client should assume that whoever/whatever provided the client the contract terms to
# follow could definitely have been compromised by EVIL HACKERS AFTER THE CLIENT'S PRECIOUS HDD.
#
# Nasty scenarios we should be concerned about include (with overlapping concerns):
#
# 1. the HODL API server could have been hacked
# 2. there could be a man-in-the-middle attack happening, making data untrustworthy
# 3. the contract terms provided could have been falsified in some/any way
# 4. the on-chain contract (smart coin via puzzlehash/reveal) could be bogus
# 5. sneaky hacker farmers could mess with how pushed coins/puzzles are processed on-chain
# 6. and more!
#
# With these concerns in mind, the client needs to be sure that everything is secure before
# committing funds on-chain. The smart contract itself provides excellent on-chain security to
# make sure that no adverse shenanigans can happen once funds are on chain. The purpose in this
# `validateContract` function is to make sure that there are no other surprises in store (as
# listed above).
#
# As stated in the docstring: This function makes sure that sure that the provided contract is
# what the client expects.
#
# What the HODL contract is all about is providing a secure conditional lockbox where:
#
# A) the client can stash a deposit into the box that ONLY THE CLIENT CAN EVER ACCESS
# B) a secure way is provided for the server (i.e. the HDDcoin team) to add the guaranteed
# reward to the lockbox for later payout (at end of contract)
# - IMPORTANT NOTE: the server can never access the deposit in any way whatsoever
# - the HDDcoin team gets reward funds from a HODL reserve in the pre-farm funds
# C) if the client meets the contract terms (i.e. the HODL deposit sits in the box for the
# length of the term), both the deposit and the reward pay out to the client's wallet
# D) if the client decides to cancel the contract, the deposit is returned to the client, and
# the guaranteed reward is returned to the HDDcoin HODL reserve
# - ONLY THE CLIENT CAN EVER CANCEL THE CONTRACT. NOBODY ELSE.
# - once the reward is added, it is GUARANTEED for the client (unless canceled). Sweet!
# E) there are other various bits involved... but they mostly revolve around ensuring that
# the mechanics of the contract are secure against nefarious hackers... I see you there
# reading this... SHOO!! Go away!! ¬_¬
#
# All of those listed things are *if all is as expected*. Again, this is what this validation
# function is about. Even if the server is compromised (which it should not be, but... TRUST
# NOBODY!), the client's HDD must NEVER be placed at risk here. This is fundamental to the HODL
# program, and is supported through all supporting client code, server code, and on-chain code.
vlog(1, "Extracting receipt fields for validation")
try:
rx_program_name = receipt["requested"]["program_name"]
rx_deposit_bytes = receipt["requested"]["deposit_bytes"]
rx_payout_address = receipt["requested"]["payout_address"]
rx_client_pubkey = receipt["requested"]["client_pubkey"]
rx_contract_id = receipt["receipt_info"]["contract_id"]
rx_contract_address = receipt["coin_details"]["contract_address"]
rx_reveal = receipt["coin_details"]["reveal"]
rx_solCancelDep = receipt["coin_details"]["solution_cancel_deposited"]
rx_solCancelGuar = receipt["coin_details"]["solution_cancel_guaranteed"]
rx_solPayout = receipt["coin_details"]["solution_payout"]
except KeyError as e:
raise exc.ContractValidationError(f"Missing receipt key: {e.args[0]}")
# Check the receipt fields (which don't matter that much, but still...)
vlog(1, "Validating requested vs received")
_cmpRct("program_name", ex_program_name, rx_program_name)
_cmpRct("deposit_bytes", ex_deposit_bytes, rx_deposit_bytes)
_cmpRct("payout_address", ex_payout_address, rx_payout_address)
_cmpRct("client_pubkey", ex_client_pubkey, rx_client_pubkey)
# Contract address and reveal must match...
vlog(1, "Validating puzzle hash")
ph_b32 = _validatePuzzleHash(rx_contract_address, rx_reveal)
# Reveal must be the contract we expect...
vlog(1, "Validating puzzle reveal")
ex_payout_ph = f"0x{hddcoin.hodl.util.addr2puzhash(ex_payout_address)}"
ex_reward_bytes = int(ex_deposit_bytes * (ex_reward_percent / 100))
epoch_s = int(time.time())
ex_payout_tstamp = int(epoch_s + (ex_term_in_months * SECONDS_PER_MONTH))
try:
terms = _extractBakedInTerms(rx_reveal)
_cmpCon("deposit_bytes", ex_deposit_bytes, terms.deposit_bytes)
_cmpCon("payout_address", ex_payout_ph, terms.payout_puzhash)
_cmpCon("reward_bytes", ex_reward_bytes, terms.reward_bytes)
_cmpCon("contract_id", f"0x{rx_contract_id}", terms.contract_id)
_cmpCon("program_name", ex_program_name, terms.program_name)
_cmpCon("client_pubkey", f"0x{ex_client_pubkey}", terms.client_pubkey)
except Exception as e:
raise exc.ContractValidationError(f"Error validating contract terms: {e!r}")
if abs(ex_payout_tstamp - terms.payout_tstamp) > 3600: # 1h good enough for validation
msg = f"Unexpected contract value for payout_timestamp: {terms.payout_tstamp}"
raise exc.ContractValidationError(msg)
# Solutions must match...
vlog(1, "Validating solutions")
ex_solCancelDep = str(Program.to([1, ex_deposit_bytes, ph_b32]))
ex_solCancelGuar = str(Program.to([1, ex_deposit_bytes + ex_reward_bytes, ph_b32]))
ex_solPayout = str(Program.to([3, ex_deposit_bytes + ex_reward_bytes, ph_b32]))
_cmpRct("solution_cancel_deposited", ex_solCancelDep, rx_solCancelDep)
_cmpRct("solution_cancel_guaranteed", ex_solCancelGuar, rx_solCancelGuar)
_cmpRct("solution_payout", ex_solPayout, rx_solPayout)
# ALL IS WELL IF WE GOT HERE!
vlog(1, "Contract provided by server is as expected!")
def validateCancellation(ex_contract_id: str,
contractDetails: ContractDetails,
) -> None:
# This is essentially just cross-checking the contract dict details with what is actually in the
# reveal. We don't need to validate the cancellation solutions since we don't use/need them.
# Those are only for users who want to do it on their own without HODL tooling.
rx_contract_id = contractDetails.contract_id
rx_contract_address = contractDetails.contract_address
rx_reveal = contractDetails.puzzle_reveal
if rx_contract_id != ex_contract_id:
raise exc.CancelValidationError("contract_id mismatch")
vlog(1, "Validating puzzle hash")
_validatePuzzleHash(rx_contract_address, rx_reveal)
vlog(1, "Validating puzzle reveal")
# Not much to validate here. If it is the right contract form, it can only be a HODL contract.
# Even still, to be ABSOLUTELY sure, we'll validate that the baked-in terms match the contract
# details displayed to the user.
terms = _extractBakedInTerms(rx_reveal)
_cmpCon("deposit_bytes", contractDetails.deposit_bytes, terms.deposit_bytes)
_cmpCon("payout_address", contractDetails.payout_address, puzhash2addr(terms.payout_puzhash))
_cmpCon("reward_bytes", contractDetails.reward_bytes, terms.reward_bytes)
_cmpCon("contract_id", f"0x{contractDetails.contract_id}", terms.contract_id)
_cmpCon("program_name", f"{contractDetails.program_name}", terms.program_name)
_cmpCon("client_pubkey", f"0x{contractDetails.client_pubkey}", terms.client_pubkey)
| true | true |
f71417e1074609cbc2856e5c9c5709dab7cdcb5e | 114 | py | Python | django_filtersmerger/apps.py | MPASolutions/django-filtersmerger | 9a209af142a5be15b79cb7025fdf771dac3aec35 | [
"MIT"
] | null | null | null | django_filtersmerger/apps.py | MPASolutions/django-filtersmerger | 9a209af142a5be15b79cb7025fdf771dac3aec35 | [
"MIT"
] | null | null | null | django_filtersmerger/apps.py | MPASolutions/django-filtersmerger | 9a209af142a5be15b79cb7025fdf771dac3aec35 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class DjangoFiltersMergerConfig(AppConfig):
name = 'django_filtersmerger'
| 19 | 43 | 0.807018 | from django.apps import AppConfig
class DjangoFiltersMergerConfig(AppConfig):
name = 'django_filtersmerger'
| true | true |
f7141a61052156f4da421af60a3f48ccdefcac14 | 103 | py | Python | KulliSharif/KulliSharifapp/apps.py | iqran-star/API-PROJECT | 77d021098fd17b4ce086f8e6f914a7722fa9b558 | [
"MIT"
] | null | null | null | KulliSharif/KulliSharifapp/apps.py | iqran-star/API-PROJECT | 77d021098fd17b4ce086f8e6f914a7722fa9b558 | [
"MIT"
] | null | null | null | KulliSharif/KulliSharifapp/apps.py | iqran-star/API-PROJECT | 77d021098fd17b4ce086f8e6f914a7722fa9b558 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class KullisharifappConfig(AppConfig):
name = 'KulliSharifapp'
| 17.166667 | 38 | 0.786408 | from django.apps import AppConfig
class KullisharifappConfig(AppConfig):
name = 'KulliSharifapp'
| true | true |
f7141cc9e45ecbc5219b86989037c927da949950 | 16,052 | py | Python | owslib/fes2.py | vjf/OWSLib | 9a3768c3cae21cb9a30dc4437259dfaa8dde1118 | [
"BSD-3-Clause"
] | null | null | null | owslib/fes2.py | vjf/OWSLib | 9a3768c3cae21cb9a30dc4437259dfaa8dde1118 | [
"BSD-3-Clause"
] | null | null | null | owslib/fes2.py | vjf/OWSLib | 9a3768c3cae21cb9a30dc4437259dfaa8dde1118 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2021 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
"""
API for OGC Filter Encoding (FE) constructs and metadata.
Filter Encoding: http://www.opengeospatial.org/standards/filter
Supports version 2.0.2 (09-026r2).
"""
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
# default variables
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["dif", "fes", "gml", "ogc", "ows110", "xs", "xsi"])
ns[None] = n.get_namespace("fes")
return ns
namespaces = get_namespaces()
schema = 'http://schemas.opengis.net/filter/2.0/filterAll.xsd'
schema_location = '%s %s' % (namespaces['fes'], schema)
class FilterRequest(object):
""" filter class """
def __init__(self, parent=None, version='2.0.0'):
"""
filter Constructor
Parameters
----------
- parent: parent etree.Element object (default is None)
- version: version (default is '2.0.0')
"""
self.version = version
self._root = etree.Element(util.nspath_eval('fes:Filter', namespaces))
if parent is not None:
self._root.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
def set(self, parent=False, qtype=None, keywords=[], typenames='csw:Record', propertyname='csw:AnyText', bbox=None,
identifier=None):
"""
Construct and process a GetRecords request
Parameters
----------
- parent: the parent Element object. If this is not, then generate a standalone request
- qtype: type of resource to query (i.e. service, dataset)
- keywords: list of keywords
- propertyname: the ValueReference to Filter against
- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]
- identifier: the dc:identifier to query against with a PropertyIsEqualTo. Ignores all other inputs.
"""
# Set the identifier if passed. Ignore other parameters
dc_identifier_equals_filter = None
if identifier is not None:
dc_identifier_equals_filter = PropertyIsEqualTo('dc:identifier', identifier)
self._root.append(dc_identifier_equals_filter.toXML())
return self._root
# Set the query type if passed
dc_type_equals_filter = None
if qtype is not None:
dc_type_equals_filter = PropertyIsEqualTo('dc:type', qtype)
# Set a bbox query if passed
bbox_filter = None
if bbox is not None:
bbox_filter = BBox(bbox)
# Set a keyword query if passed
keyword_filter = None
if len(keywords) > 0:
if len(keywords) > 1: # loop multiple keywords into an Or
ks = []
for i in keywords:
ks.append(PropertyIsLike(propertyname, "*%s*" % i, wildCard="*"))
keyword_filter = Or(operations=ks)
elif len(keywords) == 1: # one keyword
keyword_filter = PropertyIsLike(propertyname, "*%s*" % keywords[0], wildCard="*")
# And together filters if more than one exists
filters = [_f for _f in [keyword_filter, bbox_filter, dc_type_equals_filter] if _f]
if len(filters) == 1:
self._root.append(filters[0].toXML())
elif len(filters) > 1:
self._root.append(And(operations=filters).toXML())
return self._root
def setConstraint(self, constraint, tostring=False):
"""
Construct and process a GetRecords request
Parameters
----------
- constraint: An OgcExpression object
- tostring (optional): return as string
"""
self._root.append(constraint.toXML())
if tostring:
return util.element_to_string(self._root, xml_declaration=False)
return self._root
def setConstraintList(self, constraints, tostring=False):
"""
Construct and process a GetRecords request
Parameters
----------
- constraints: A list of OgcExpression objects
The list is interpretted like so:
[a,b,c]
a || b || c
[[a,b,c]]
a && b && c
[[a,b],[c],[d],[e]] or [[a,b],c,d,e]
(a && b) || c || d || e
- tostring (optional): return as string
"""
ors = []
if len(constraints) == 1:
if isinstance(constraints[0], OgcExpression):
flt = self.setConstraint(constraints[0])
else:
self._root.append(And(operations=constraints[0]).toXML())
flt = self._root
if tostring:
return util.element_to_string(flt, xml_declaration=False)
else:
return flt
for c in constraints:
if isinstance(c, OgcExpression):
ors.append(c)
elif isinstance(c, list) or isinstance(c, tuple):
if len(c) == 1:
ors.append(c[0])
elif len(c) >= 2:
ands = []
for sub in c:
if isinstance(sub, OgcExpression):
ands.append(sub)
ors.append(And(operations=ands))
self._root.append(Or(operations=ors).toXML())
if tostring:
return util.element_to_string(self._root, xml_declaration=False)
return self._root
class FilterCapabilities(object):
"""Abstraction for Filter_Capabilities 2.0"""
def __init__(self, elem):
if elem is None:
self.spatial_operands = []
self.spatial_operators = []
self.temporal_operators = []
self.temporal_operands = []
self.scalar_comparison_operators = []
self.conformance = {}
return
# Spatial_Capabilities
self.spatial_operands = [f.attrib.get('name') for f in elem.findall(util.nspath_eval(
'fes:Spatial_Capabilities/fes:GeometryOperands/fes:GeometryOperand', namespaces))]
self.spatial_operators = []
for f in elem.findall(util.nspath_eval(
'fes:Spatial_Capabilities/fes:SpatialOperators/fes:SpatialOperator', namespaces)):
self.spatial_operators.append(f.attrib['name'])
# Temporal_Capabilities
self.temporal_operands = [f.attrib.get('name') for f in elem.findall(util.nspath_eval(
'fes:Temporal_Capabilities/fes:TemporalOperands/fes:TemporalOperand', namespaces))]
self.temporal_operators = []
for f in elem.findall(util.nspath_eval(
'fes:Temporal_Capabilities/fes:TemporalOperators/fes:TemporalOperator', namespaces)):
self.temporal_operators.append(f.attrib['name'])
# Scalar_Capabilities
self.scalar_comparison_operators = [f.text for f in elem.findall(util.nspath_eval(
'fes:Scalar_Capabilities/fes:ComparisonOperators/fes:ComparisonOperator', namespaces))]
# Conformance
self.conformance = {}
for f in elem.findall(util.nspath_eval('fes:Conformance/fes:Constraint', namespaces)):
self.conformance[f.attrib.get('name')] = f.find(util.nspath_eval('ows110:DefaultValue', namespaces)).text
def setsortby(parent, propertyname, order='ASC'):
"""
constructs a SortBy element
Parameters
----------
- parent: parent etree.Element object
- propertyname: the ValueReference
- order: the SortOrder (default is 'ASC')
"""
tmp = etree.SubElement(parent, util.nspath_eval('fes:SortBy', namespaces))
tmp2 = etree.SubElement(tmp, util.nspath_eval('fes:SortProperty', namespaces))
etree.SubElement(tmp2, util.nspath_eval('fes:ValueReference', namespaces)).text = propertyname
etree.SubElement(tmp2, util.nspath_eval('fes:SortOrder', namespaces)).text = order
class SortProperty(object):
def __init__(self, propertyname, order='ASC'):
self.propertyname = propertyname
self.order = order.upper()
if self.order not in ['DESC', 'ASC']:
raise ValueError("SortOrder can only be 'ASC' or 'DESC'")
def toXML(self):
node0 = etree.Element(util.nspath_eval("fes:SortProperty", namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:SortOrder', namespaces)).text = self.order
return node0
class SortBy(object):
def __init__(self, properties):
self.properties = properties
def toXML(self):
node0 = etree.Element(util.nspath_eval("fes:SortBy", namespaces))
for prop in self.properties:
node0.append(prop.toXML())
return node0
class OgcExpression(object):
def __init__(self):
pass
class BinaryComparisonOpType(OgcExpression):
""" Super class of all the property operation classes"""
def __init__(self, propertyoperator, propertyname, literal, matchcase=True):
self.propertyoperator = propertyoperator
self.propertyname = propertyname
self.literal = literal
self.matchcase = matchcase
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.propertyoperator, namespaces))
if not self.matchcase:
node0.set('matchCase', 'false')
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:Literal', namespaces)).text = self.literal
return node0
class PropertyIsEqualTo(BinaryComparisonOpType):
""" PropertyIsEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsEqualTo', propertyname, literal, matchcase)
class PropertyIsNotEqualTo(BinaryComparisonOpType):
""" PropertyIsNotEqualTo class """
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsNotEqualTo', propertyname, literal, matchcase)
class PropertyIsLessThan(BinaryComparisonOpType):
"""PropertyIsLessThan class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsLessThan', propertyname, literal, matchcase)
class PropertyIsGreaterThan(BinaryComparisonOpType):
"""PropertyIsGreaterThan class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsGreaterThan', propertyname, literal, matchcase)
class PropertyIsLessThanOrEqualTo(BinaryComparisonOpType):
"""PropertyIsLessThanOrEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsLessThanOrEqualTo', propertyname, literal, matchcase)
class PropertyIsGreaterThanOrEqualTo(BinaryComparisonOpType):
"""PropertyIsGreaterThanOrEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsGreaterThanOrEqualTo', propertyname, literal, matchcase)
class PropertyIsLike(OgcExpression):
"""PropertyIsLike class"""
def __init__(self, propertyname, literal, escapeChar='\\', singleChar='_', wildCard='%', matchCase=True):
self.propertyname = propertyname
self.literal = literal
self.escapeChar = escapeChar
self.singleChar = singleChar
self.wildCard = wildCard
self.matchCase = matchCase
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsLike', namespaces))
node0.set('wildCard', self.wildCard)
node0.set('singleChar', self.singleChar)
node0.set('escapeChar', self.escapeChar)
if not self.matchCase:
node0.set('matchCase', 'false')
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:Literal', namespaces)).text = self.literal
return node0
class PropertyIsNull(OgcExpression):
"""PropertyIsNull class"""
def __init__(self, propertyname):
self.propertyname = propertyname
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsNull', namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
return node0
class PropertyIsBetween(OgcExpression):
"""PropertyIsBetween class"""
def __init__(self, propertyname, lower, upper):
self.propertyname = propertyname
self.lower = lower
self.upper = upper
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsBetween', namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
node1 = etree.SubElement(node0, util.nspath_eval('fes:LowerBoundary', namespaces))
etree.SubElement(node1, util.nspath_eval('fes:Literal', namespaces)).text = '%s' % self.lower
node2 = etree.SubElement(node0, util.nspath_eval('fes:UpperBoundary', namespaces))
etree.SubElement(node2, util.nspath_eval('fes:Literal', namespaces)).text = '%s' % self.upper
return node0
class BBox(OgcExpression):
"""Construct a BBox, two pairs of coordinates (west-south and east-north)"""
def __init__(self, bbox, crs=None):
self.bbox = bbox
self.crs = crs
def toXML(self):
tmp = etree.Element(util.nspath_eval('fes:BBOX', namespaces))
etree.SubElement(tmp, util.nspath_eval('fes:ValueReference', namespaces)).text = 'ows:BoundingBox'
tmp2 = etree.SubElement(tmp, util.nspath_eval('gml:Envelope', namespaces))
if self.crs is not None:
tmp2.set('srsName', self.crs)
etree.SubElement(tmp2, util.nspath_eval('gml:lowerCorner', namespaces)).text = '{} {}'.format(
self.bbox[0], self.bbox[1])
etree.SubElement(tmp2, util.nspath_eval('gml:upperCorner', namespaces)).text = '{} {}'.format(
self.bbox[2], self.bbox[3])
return tmp
# BINARY
class BinaryLogicOpType(OgcExpression):
""" Binary Operators: And / Or """
def __init__(self, binary_operator, operations):
self.binary_operator = binary_operator
try:
assert len(operations) >= 2
self.operations = operations
except Exception:
raise ValueError("Binary operations (And / Or) require a minimum of two operations to operate against")
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.binary_operator, namespaces))
for op in self.operations:
node0.append(op.toXML())
return node0
class And(BinaryLogicOpType):
def __init__(self, operations):
super(And, self).__init__('fes:And', operations)
class Or(BinaryLogicOpType):
def __init__(self, operations):
super(Or, self).__init__('fes:Or', operations)
# UNARY
class UnaryLogicOpType(OgcExpression):
""" Unary Operator: Not """
def __init__(self, unary_operator, operations):
self.unary_operator = unary_operator
self.operations = operations
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.unary_operator, namespaces))
for op in self.operations:
node0.append(op.toXML())
return node0
class Not(UnaryLogicOpType):
def __init__(self, operations):
super(Not, self).__init__('fes:Not', operations)
| 36.56492 | 119 | 0.637989 |
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["dif", "fes", "gml", "ogc", "ows110", "xs", "xsi"])
ns[None] = n.get_namespace("fes")
return ns
namespaces = get_namespaces()
schema = 'http://schemas.opengis.net/filter/2.0/filterAll.xsd'
schema_location = '%s %s' % (namespaces['fes'], schema)
class FilterRequest(object):
def __init__(self, parent=None, version='2.0.0'):
self.version = version
self._root = etree.Element(util.nspath_eval('fes:Filter', namespaces))
if parent is not None:
self._root.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
def set(self, parent=False, qtype=None, keywords=[], typenames='csw:Record', propertyname='csw:AnyText', bbox=None,
identifier=None):
dc_identifier_equals_filter = None
if identifier is not None:
dc_identifier_equals_filter = PropertyIsEqualTo('dc:identifier', identifier)
self._root.append(dc_identifier_equals_filter.toXML())
return self._root
dc_type_equals_filter = None
if qtype is not None:
dc_type_equals_filter = PropertyIsEqualTo('dc:type', qtype)
bbox_filter = None
if bbox is not None:
bbox_filter = BBox(bbox)
keyword_filter = None
if len(keywords) > 0:
if len(keywords) > 1:
ks = []
for i in keywords:
ks.append(PropertyIsLike(propertyname, "*%s*" % i, wildCard="*"))
keyword_filter = Or(operations=ks)
elif len(keywords) == 1:
keyword_filter = PropertyIsLike(propertyname, "*%s*" % keywords[0], wildCard="*")
filters = [_f for _f in [keyword_filter, bbox_filter, dc_type_equals_filter] if _f]
if len(filters) == 1:
self._root.append(filters[0].toXML())
elif len(filters) > 1:
self._root.append(And(operations=filters).toXML())
return self._root
def setConstraint(self, constraint, tostring=False):
self._root.append(constraint.toXML())
if tostring:
return util.element_to_string(self._root, xml_declaration=False)
return self._root
def setConstraintList(self, constraints, tostring=False):
ors = []
if len(constraints) == 1:
if isinstance(constraints[0], OgcExpression):
flt = self.setConstraint(constraints[0])
else:
self._root.append(And(operations=constraints[0]).toXML())
flt = self._root
if tostring:
return util.element_to_string(flt, xml_declaration=False)
else:
return flt
for c in constraints:
if isinstance(c, OgcExpression):
ors.append(c)
elif isinstance(c, list) or isinstance(c, tuple):
if len(c) == 1:
ors.append(c[0])
elif len(c) >= 2:
ands = []
for sub in c:
if isinstance(sub, OgcExpression):
ands.append(sub)
ors.append(And(operations=ands))
self._root.append(Or(operations=ors).toXML())
if tostring:
return util.element_to_string(self._root, xml_declaration=False)
return self._root
class FilterCapabilities(object):
def __init__(self, elem):
if elem is None:
self.spatial_operands = []
self.spatial_operators = []
self.temporal_operators = []
self.temporal_operands = []
self.scalar_comparison_operators = []
self.conformance = {}
return
self.spatial_operands = [f.attrib.get('name') for f in elem.findall(util.nspath_eval(
'fes:Spatial_Capabilities/fes:GeometryOperands/fes:GeometryOperand', namespaces))]
self.spatial_operators = []
for f in elem.findall(util.nspath_eval(
'fes:Spatial_Capabilities/fes:SpatialOperators/fes:SpatialOperator', namespaces)):
self.spatial_operators.append(f.attrib['name'])
self.temporal_operands = [f.attrib.get('name') for f in elem.findall(util.nspath_eval(
'fes:Temporal_Capabilities/fes:TemporalOperands/fes:TemporalOperand', namespaces))]
self.temporal_operators = []
for f in elem.findall(util.nspath_eval(
'fes:Temporal_Capabilities/fes:TemporalOperators/fes:TemporalOperator', namespaces)):
self.temporal_operators.append(f.attrib['name'])
self.scalar_comparison_operators = [f.text for f in elem.findall(util.nspath_eval(
'fes:Scalar_Capabilities/fes:ComparisonOperators/fes:ComparisonOperator', namespaces))]
self.conformance = {}
for f in elem.findall(util.nspath_eval('fes:Conformance/fes:Constraint', namespaces)):
self.conformance[f.attrib.get('name')] = f.find(util.nspath_eval('ows110:DefaultValue', namespaces)).text
def setsortby(parent, propertyname, order='ASC'):
tmp = etree.SubElement(parent, util.nspath_eval('fes:SortBy', namespaces))
tmp2 = etree.SubElement(tmp, util.nspath_eval('fes:SortProperty', namespaces))
etree.SubElement(tmp2, util.nspath_eval('fes:ValueReference', namespaces)).text = propertyname
etree.SubElement(tmp2, util.nspath_eval('fes:SortOrder', namespaces)).text = order
class SortProperty(object):
def __init__(self, propertyname, order='ASC'):
self.propertyname = propertyname
self.order = order.upper()
if self.order not in ['DESC', 'ASC']:
raise ValueError("SortOrder can only be 'ASC' or 'DESC'")
def toXML(self):
node0 = etree.Element(util.nspath_eval("fes:SortProperty", namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:SortOrder', namespaces)).text = self.order
return node0
class SortBy(object):
def __init__(self, properties):
self.properties = properties
def toXML(self):
node0 = etree.Element(util.nspath_eval("fes:SortBy", namespaces))
for prop in self.properties:
node0.append(prop.toXML())
return node0
class OgcExpression(object):
def __init__(self):
pass
class BinaryComparisonOpType(OgcExpression):
def __init__(self, propertyoperator, propertyname, literal, matchcase=True):
self.propertyoperator = propertyoperator
self.propertyname = propertyname
self.literal = literal
self.matchcase = matchcase
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.propertyoperator, namespaces))
if not self.matchcase:
node0.set('matchCase', 'false')
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:Literal', namespaces)).text = self.literal
return node0
class PropertyIsEqualTo(BinaryComparisonOpType):
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsEqualTo', propertyname, literal, matchcase)
class PropertyIsNotEqualTo(BinaryComparisonOpType):
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsNotEqualTo', propertyname, literal, matchcase)
class PropertyIsLessThan(BinaryComparisonOpType):
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsLessThan', propertyname, literal, matchcase)
class PropertyIsGreaterThan(BinaryComparisonOpType):
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsGreaterThan', propertyname, literal, matchcase)
class PropertyIsLessThanOrEqualTo(BinaryComparisonOpType):
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsLessThanOrEqualTo', propertyname, literal, matchcase)
class PropertyIsGreaterThanOrEqualTo(BinaryComparisonOpType):
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsGreaterThanOrEqualTo', propertyname, literal, matchcase)
class PropertyIsLike(OgcExpression):
def __init__(self, propertyname, literal, escapeChar='\\', singleChar='_', wildCard='%', matchCase=True):
self.propertyname = propertyname
self.literal = literal
self.escapeChar = escapeChar
self.singleChar = singleChar
self.wildCard = wildCard
self.matchCase = matchCase
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsLike', namespaces))
node0.set('wildCard', self.wildCard)
node0.set('singleChar', self.singleChar)
node0.set('escapeChar', self.escapeChar)
if not self.matchCase:
node0.set('matchCase', 'false')
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:Literal', namespaces)).text = self.literal
return node0
class PropertyIsNull(OgcExpression):
def __init__(self, propertyname):
self.propertyname = propertyname
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsNull', namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
return node0
class PropertyIsBetween(OgcExpression):
def __init__(self, propertyname, lower, upper):
self.propertyname = propertyname
self.lower = lower
self.upper = upper
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsBetween', namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
node1 = etree.SubElement(node0, util.nspath_eval('fes:LowerBoundary', namespaces))
etree.SubElement(node1, util.nspath_eval('fes:Literal', namespaces)).text = '%s' % self.lower
node2 = etree.SubElement(node0, util.nspath_eval('fes:UpperBoundary', namespaces))
etree.SubElement(node2, util.nspath_eval('fes:Literal', namespaces)).text = '%s' % self.upper
return node0
class BBox(OgcExpression):
def __init__(self, bbox, crs=None):
self.bbox = bbox
self.crs = crs
def toXML(self):
tmp = etree.Element(util.nspath_eval('fes:BBOX', namespaces))
etree.SubElement(tmp, util.nspath_eval('fes:ValueReference', namespaces)).text = 'ows:BoundingBox'
tmp2 = etree.SubElement(tmp, util.nspath_eval('gml:Envelope', namespaces))
if self.crs is not None:
tmp2.set('srsName', self.crs)
etree.SubElement(tmp2, util.nspath_eval('gml:lowerCorner', namespaces)).text = '{} {}'.format(
self.bbox[0], self.bbox[1])
etree.SubElement(tmp2, util.nspath_eval('gml:upperCorner', namespaces)).text = '{} {}'.format(
self.bbox[2], self.bbox[3])
return tmp
class BinaryLogicOpType(OgcExpression):
def __init__(self, binary_operator, operations):
self.binary_operator = binary_operator
try:
assert len(operations) >= 2
self.operations = operations
except Exception:
raise ValueError("Binary operations (And / Or) require a minimum of two operations to operate against")
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.binary_operator, namespaces))
for op in self.operations:
node0.append(op.toXML())
return node0
class And(BinaryLogicOpType):
def __init__(self, operations):
super(And, self).__init__('fes:And', operations)
class Or(BinaryLogicOpType):
def __init__(self, operations):
super(Or, self).__init__('fes:Or', operations)
class UnaryLogicOpType(OgcExpression):
def __init__(self, unary_operator, operations):
self.unary_operator = unary_operator
self.operations = operations
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.unary_operator, namespaces))
for op in self.operations:
node0.append(op.toXML())
return node0
class Not(UnaryLogicOpType):
def __init__(self, operations):
super(Not, self).__init__('fes:Not', operations)
| true | true |
f7141cf967ed30a0143ab32103399d5b149ab436 | 361 | py | Python | users/migrations/0002_alter_user_options.py | JeffreyDrJ/myfirstblog | 5b1f0fbd6c85027bad7d0e24d67251f41da53fac | [
"MIT"
] | null | null | null | users/migrations/0002_alter_user_options.py | JeffreyDrJ/myfirstblog | 5b1f0fbd6c85027bad7d0e24d67251f41da53fac | [
"MIT"
] | 1 | 2021-05-29T17:09:43.000Z | 2021-05-29T17:09:43.000Z | users/migrations/0002_alter_user_options.py | JeffreyDrJ/myfirstblog | 5b1f0fbd6c85027bad7d0e24d67251f41da53fac | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-05-05 06:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'verbose_name': '用户管理', 'verbose_name_plural': '用户管理'},
),
]
| 20.055556 | 76 | 0.592798 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'verbose_name': '用户管理', 'verbose_name_plural': '用户管理'},
),
]
| true | true |
f7141de0498279619548f98c6c2bc7c8730633e4 | 4,876 | py | Python | test/mitmproxy/io/test_tnetstring.py | 0x7c48/mitmproxy | f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba | [
"MIT"
] | 74 | 2016-03-20T17:39:26.000Z | 2020-05-12T13:53:23.000Z | test/mitmproxy/io/test_tnetstring.py | 0x7c48/mitmproxy | f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba | [
"MIT"
] | 7 | 2020-06-16T06:35:02.000Z | 2022-03-15T20:15:53.000Z | test/mitmproxy/io/test_tnetstring.py | 0x7c48/mitmproxy | f9d8f3bae3f4e681d5f4d406b7e06b099e60ecba | [
"MIT"
] | 5 | 2016-12-14T14:56:57.000Z | 2020-03-08T20:58:31.000Z | import unittest
import random
import math
import io
import struct
from mitmproxy.io import tnetstring
MAXINT = 2 ** (struct.Struct('i').size * 8 - 1) - 1
FORMAT_EXAMPLES = {
b'0:}': {},
b'0:]': [],
b'51:5:hello,39:11:12345678901#4:this,4:true!0:~4:\x00\x00\x00\x00,]}':
{b'hello': [12345678901, b'this', True, None, b'\x00\x00\x00\x00']},
b'5:12345#': 12345,
b'12:this is cool,': b'this is cool',
b'19:this is unicode \xe2\x98\x85;': u'this is unicode \u2605',
b'0:,': b'',
b'0:;': u'',
b'0:~': None,
b'4:true!': True,
b'5:false!': False,
b'10:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00,': b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'24:5:12345#5:67890#5:xxxxx,]': [12345, 67890, b'xxxxx'],
b'18:3:0.1^3:0.2^3:0.3^]': [0.1, 0.2, 0.3],
b'243:238:233:228:223:218:213:208:203:198:193:188:183:178:173:168:163:158:153:148:143:138:133:128:123:118:113:108:103:99:95:91:87:83:79:75:71:67:63:59:55:51:47:43:39:35:31:27:23:19:15:11:hello-there,]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]': [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[b'hello-there']]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] # noqa
}
def get_random_object(random=random, depth=0):
"""Generate a random serializable object."""
# The probability of generating a scalar value increases as the depth increase.
# This ensures that we bottom out eventually.
if random.randint(depth, 10) <= 4:
what = random.randint(0, 1)
if what == 0:
n = random.randint(0, 10)
l = []
for _ in range(n):
l.append(get_random_object(random, depth + 1))
return l
if what == 1:
n = random.randint(0, 10)
d = {}
for _ in range(n):
n = random.randint(0, 100)
k = str([random.randint(32, 126) for _ in range(n)])
d[k] = get_random_object(random, depth + 1)
return d
else:
what = random.randint(0, 4)
if what == 0:
return None
if what == 1:
return True
if what == 2:
return False
if what == 3:
if random.randint(0, 1) == 0:
return random.randint(0, MAXINT)
else:
return -1 * random.randint(0, MAXINT)
n = random.randint(0, 100)
return bytes([random.randint(32, 126) for _ in range(n)])
class Test_Format(unittest.TestCase):
def test_roundtrip_format_examples(self):
for data, expect in FORMAT_EXAMPLES.items():
self.assertEqual(expect, tnetstring.loads(data))
self.assertEqual(
expect, tnetstring.loads(tnetstring.dumps(expect)))
self.assertEqual((expect, b''), tnetstring.pop(data))
def test_roundtrip_format_random(self):
for _ in range(500):
v = get_random_object()
self.assertEqual(v, tnetstring.loads(tnetstring.dumps(v)))
self.assertEqual((v, b""), tnetstring.pop(tnetstring.dumps(v)))
def test_roundtrip_format_unicode(self):
for _ in range(500):
v = get_random_object()
self.assertEqual(v, tnetstring.loads(tnetstring.dumps(v)))
self.assertEqual((v, b''), tnetstring.pop(tnetstring.dumps(v)))
def test_roundtrip_big_integer(self):
i1 = math.factorial(30000)
s = tnetstring.dumps(i1)
i2 = tnetstring.loads(s)
self.assertEqual(i1, i2)
class Test_FileLoading(unittest.TestCase):
def test_roundtrip_file_examples(self):
for data, expect in FORMAT_EXAMPLES.items():
s = io.BytesIO()
s.write(data)
s.write(b'OK')
s.seek(0)
self.assertEqual(expect, tnetstring.load(s))
self.assertEqual(b'OK', s.read())
s = io.BytesIO()
tnetstring.dump(expect, s)
s.write(b'OK')
s.seek(0)
self.assertEqual(expect, tnetstring.load(s))
self.assertEqual(b'OK', s.read())
def test_roundtrip_file_random(self):
for _ in range(500):
v = get_random_object()
s = io.BytesIO()
tnetstring.dump(v, s)
s.write(b'OK')
s.seek(0)
self.assertEqual(v, tnetstring.load(s))
self.assertEqual(b'OK', s.read())
def test_error_on_absurd_lengths(self):
s = io.BytesIO()
s.write(b'1000000000:pwned!,')
s.seek(0)
with self.assertRaises(ValueError):
tnetstring.load(s)
self.assertEqual(s.read(1), b':')
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(Test_Format))
suite.addTest(loader.loadTestsFromTestCase(Test_FileLoading))
return suite
| 35.333333 | 381 | 0.553322 | import unittest
import random
import math
import io
import struct
from mitmproxy.io import tnetstring
MAXINT = 2 ** (struct.Struct('i').size * 8 - 1) - 1
FORMAT_EXAMPLES = {
b'0:}': {},
b'0:]': [],
b'51:5:hello,39:11:12345678901#4:this,4:true!0:~4:\x00\x00\x00\x00,]}':
{b'hello': [12345678901, b'this', True, None, b'\x00\x00\x00\x00']},
b'5:12345#': 12345,
b'12:this is cool,': b'this is cool',
b'19:this is unicode \xe2\x98\x85;': u'this is unicode \u2605',
b'0:,': b'',
b'0:;': u'',
b'0:~': None,
b'4:true!': True,
b'5:false!': False,
b'10:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00,': b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'24:5:12345#5:67890#5:xxxxx,]': [12345, 67890, b'xxxxx'],
b'18:3:0.1^3:0.2^3:0.3^]': [0.1, 0.2, 0.3],
b'243:238:233:228:223:218:213:208:203:198:193:188:183:178:173:168:163:158:153:148:143:138:133:128:123:118:113:108:103:99:95:91:87:83:79:75:71:67:63:59:55:51:47:43:39:35:31:27:23:19:15:11:hello-there,]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]': [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[b'hello-there']]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
}
def get_random_object(random=random, depth=0):
if random.randint(depth, 10) <= 4:
what = random.randint(0, 1)
if what == 0:
n = random.randint(0, 10)
l = []
for _ in range(n):
l.append(get_random_object(random, depth + 1))
return l
if what == 1:
n = random.randint(0, 10)
d = {}
for _ in range(n):
n = random.randint(0, 100)
k = str([random.randint(32, 126) for _ in range(n)])
d[k] = get_random_object(random, depth + 1)
return d
else:
what = random.randint(0, 4)
if what == 0:
return None
if what == 1:
return True
if what == 2:
return False
if what == 3:
if random.randint(0, 1) == 0:
return random.randint(0, MAXINT)
else:
return -1 * random.randint(0, MAXINT)
n = random.randint(0, 100)
return bytes([random.randint(32, 126) for _ in range(n)])
class Test_Format(unittest.TestCase):
def test_roundtrip_format_examples(self):
for data, expect in FORMAT_EXAMPLES.items():
self.assertEqual(expect, tnetstring.loads(data))
self.assertEqual(
expect, tnetstring.loads(tnetstring.dumps(expect)))
self.assertEqual((expect, b''), tnetstring.pop(data))
def test_roundtrip_format_random(self):
for _ in range(500):
v = get_random_object()
self.assertEqual(v, tnetstring.loads(tnetstring.dumps(v)))
self.assertEqual((v, b""), tnetstring.pop(tnetstring.dumps(v)))
def test_roundtrip_format_unicode(self):
for _ in range(500):
v = get_random_object()
self.assertEqual(v, tnetstring.loads(tnetstring.dumps(v)))
self.assertEqual((v, b''), tnetstring.pop(tnetstring.dumps(v)))
def test_roundtrip_big_integer(self):
i1 = math.factorial(30000)
s = tnetstring.dumps(i1)
i2 = tnetstring.loads(s)
self.assertEqual(i1, i2)
class Test_FileLoading(unittest.TestCase):
def test_roundtrip_file_examples(self):
for data, expect in FORMAT_EXAMPLES.items():
s = io.BytesIO()
s.write(data)
s.write(b'OK')
s.seek(0)
self.assertEqual(expect, tnetstring.load(s))
self.assertEqual(b'OK', s.read())
s = io.BytesIO()
tnetstring.dump(expect, s)
s.write(b'OK')
s.seek(0)
self.assertEqual(expect, tnetstring.load(s))
self.assertEqual(b'OK', s.read())
def test_roundtrip_file_random(self):
for _ in range(500):
v = get_random_object()
s = io.BytesIO()
tnetstring.dump(v, s)
s.write(b'OK')
s.seek(0)
self.assertEqual(v, tnetstring.load(s))
self.assertEqual(b'OK', s.read())
def test_error_on_absurd_lengths(self):
s = io.BytesIO()
s.write(b'1000000000:pwned!,')
s.seek(0)
with self.assertRaises(ValueError):
tnetstring.load(s)
self.assertEqual(s.read(1), b':')
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(Test_Format))
suite.addTest(loader.loadTestsFromTestCase(Test_FileLoading))
return suite
| true | true |
f7141ebf7eb8ca97831794cbe3f70aa8721341b7 | 2,217 | py | Python | examples/gbml_omniglot.py | Renovamen/metallic | c3992e4b322f9d41d9b7997c472baf99c843046c | [
"MIT"
] | 5 | 2021-04-14T07:31:06.000Z | 2021-12-11T08:12:10.000Z | examples/gbml_omniglot.py | Renovamen/metallic | c3992e4b322f9d41d9b7997c472baf99c843046c | [
"MIT"
] | 1 | 2021-04-14T07:44:36.000Z | 2021-04-15T14:01:52.000Z | examples/gbml_omniglot.py | Renovamen/metallic | c3992e4b322f9d41d9b7997c472baf99c843046c | [
"MIT"
] | null | null | null | import os
import sys
base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(base_path)
from torch import optim
from metallic.data.benchmarks import get_benchmarks
from metallic.data.dataloader import MetaDataLoader
from metallic.models import OmniglotCNN
from metallic.metalearners import FOMAML, MAML, Reptile, MinibatchProx, ANIL
from metallic.trainer import Trainer
from metallic.utils import Logger
# ---- hyperparameters ----
ALGO = 'maml'
BATCH_SIZE = 16
N_WAY = 5
K_SHOT = 1
OUTER_LR = 0.001
INNER_LR = 0.4
INNER_STEPS = 1
N_EPOCHES = 100
N_ITERS_PER_EPOCH = 500
N_ITERS_TEST = 600
N_WORKERS = 5
# -------------------------
ALGO_LIST = {
'maml': MAML,
'fomaml': FOMAML,
'reptile': Reptile,
'minibatchprox': MinibatchProx,
'anil': ANIL
}
def set_trainer():
train_dataset, val_dataset, _ = get_benchmarks(
name = 'omniglot',
root = os.path.join(base_path, 'data'),
n_way = N_WAY,
k_shot = K_SHOT,
)
train_loader = MetaDataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=False)
val_loader = MetaDataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False)
model = OmniglotCNN(N_WAY)
if ALGO == 'anil':
in_optim = optim.SGD(model.classifier.parameters(), lr=INNER_LR)
else:
in_optim = optim.SGD(model.parameters(), lr=INNER_LR)
out_optim = optim.Adam(model.parameters(), lr=OUTER_LR)
metalearner = ALGO_LIST[ALGO](
model = model,
in_optim = in_optim,
out_optim = out_optim,
root = os.path.join(base_path, 'checkpoints'),
inner_steps = INNER_STEPS
)
logger = Logger(
root = os.path.join(base_path, 'logs'),
n_iters_per_epoch = N_ITERS_PER_EPOCH,
log_basename = metalearner.alg_name,
verbose = True
)
trainer = Trainer(
metalearner = metalearner,
train_loader = train_loader,
val_loader = val_loader,
n_epoches = N_EPOCHES,
n_iters_per_epoch = N_ITERS_PER_EPOCH,
n_iters_test = N_ITERS_TEST,
logger = logger
)
return trainer
if __name__ == '__main__':
trainer = set_trainer()
trainer.run_train()
| 25.482759 | 86 | 0.664862 | import os
import sys
base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(base_path)
from torch import optim
from metallic.data.benchmarks import get_benchmarks
from metallic.data.dataloader import MetaDataLoader
from metallic.models import OmniglotCNN
from metallic.metalearners import FOMAML, MAML, Reptile, MinibatchProx, ANIL
from metallic.trainer import Trainer
from metallic.utils import Logger
ALGO = 'maml'
BATCH_SIZE = 16
N_WAY = 5
K_SHOT = 1
OUTER_LR = 0.001
INNER_LR = 0.4
INNER_STEPS = 1
N_EPOCHES = 100
N_ITERS_PER_EPOCH = 500
N_ITERS_TEST = 600
N_WORKERS = 5
ALGO_LIST = {
'maml': MAML,
'fomaml': FOMAML,
'reptile': Reptile,
'minibatchprox': MinibatchProx,
'anil': ANIL
}
def set_trainer():
train_dataset, val_dataset, _ = get_benchmarks(
name = 'omniglot',
root = os.path.join(base_path, 'data'),
n_way = N_WAY,
k_shot = K_SHOT,
)
train_loader = MetaDataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=False)
val_loader = MetaDataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False)
model = OmniglotCNN(N_WAY)
if ALGO == 'anil':
in_optim = optim.SGD(model.classifier.parameters(), lr=INNER_LR)
else:
in_optim = optim.SGD(model.parameters(), lr=INNER_LR)
out_optim = optim.Adam(model.parameters(), lr=OUTER_LR)
metalearner = ALGO_LIST[ALGO](
model = model,
in_optim = in_optim,
out_optim = out_optim,
root = os.path.join(base_path, 'checkpoints'),
inner_steps = INNER_STEPS
)
logger = Logger(
root = os.path.join(base_path, 'logs'),
n_iters_per_epoch = N_ITERS_PER_EPOCH,
log_basename = metalearner.alg_name,
verbose = True
)
trainer = Trainer(
metalearner = metalearner,
train_loader = train_loader,
val_loader = val_loader,
n_epoches = N_EPOCHES,
n_iters_per_epoch = N_ITERS_PER_EPOCH,
n_iters_test = N_ITERS_TEST,
logger = logger
)
return trainer
if __name__ == '__main__':
trainer = set_trainer()
trainer.run_train()
| true | true |
f7141ff71ab7ab9a6756e73b6c2e968a45ebb526 | 2,637 | py | Python | workshops/migrations/0049_auto_20150916_0544.py | tracykteal/amy | cb19e318d36b880b1c3be2104efff42ef776118a | [
"MIT"
] | 1 | 2015-04-03T20:26:56.000Z | 2015-04-03T20:26:56.000Z | workshops/migrations/0049_auto_20150916_0544.py | tracykteal/amy | cb19e318d36b880b1c3be2104efff42ef776118a | [
"MIT"
] | 1 | 2019-12-13T11:22:47.000Z | 2019-12-13T11:22:47.000Z | workshops/migrations/0049_auto_20150916_0544.py | tracykteal/amy | cb19e318d36b880b1c3be2104efff42ef776118a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0048_auto_20150916_0441'),
]
operations = [
migrations.AlterField(
model_name='person',
name='gender',
field=models.CharField(max_length=1, default='U', choices=[('U', 'Prefer not to say (undisclosed)'), ('M', 'Male'), ('F', 'Female'), ('O', 'Other')]),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='airport_iata',
field=models.CharField(help_text='Please use its 3-letter IATA code (<a href="http://www.airportcodes.aero/" target="_blank">http://www.airportcodes.aero/</a>) to tell us where you\'re located.', max_length=3, verbose_name='Nearest major airport'),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='email',
field=models.EmailField(max_length=254, verbose_name='Email address'),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='gender',
field=models.CharField(max_length=1, default='U', choices=[('U', 'Prefer not to say'), ('F', 'Female'), ('M', 'Male'), ('O', 'Other (enter below)')]),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='lessons',
field=models.ManyToManyField(help_text='Please mark ALL that apply.', to='workshops.Lesson', verbose_name="Topic and lessons you're comfortable teaching"),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='occupation',
field=models.CharField(blank=True, help_text='Please choose the one that best describes you.', choices=[('undisclosed', 'Prefer not to say'), ('undergrad', 'Undergraduate student'), ('grad', 'Graduate student'), ('postdoc', 'Post-doctoral researcher'), ('faculty', 'Faculty'), ('research', 'Research staff (including research programmer)'), ('support', 'Support staff (including technical support)'), ('librarian', 'Librarian/archivist'), ('commerce', 'Commercial software developer '), ('', 'Other (enter below)')], max_length=40, default='undisclosed', verbose_name='What is your current occupation/career stage?'),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='twitter',
field=models.CharField(blank=True, max_length=100, default='', verbose_name='Twitter username'),
),
]
| 52.74 | 629 | 0.628366 |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0048_auto_20150916_0441'),
]
operations = [
migrations.AlterField(
model_name='person',
name='gender',
field=models.CharField(max_length=1, default='U', choices=[('U', 'Prefer not to say (undisclosed)'), ('M', 'Male'), ('F', 'Female'), ('O', 'Other')]),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='airport_iata',
field=models.CharField(help_text='Please use its 3-letter IATA code (<a href="http://www.airportcodes.aero/" target="_blank">http://www.airportcodes.aero/</a>) to tell us where you\'re located.', max_length=3, verbose_name='Nearest major airport'),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='email',
field=models.EmailField(max_length=254, verbose_name='Email address'),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='gender',
field=models.CharField(max_length=1, default='U', choices=[('U', 'Prefer not to say'), ('F', 'Female'), ('M', 'Male'), ('O', 'Other (enter below)')]),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='lessons',
field=models.ManyToManyField(help_text='Please mark ALL that apply.', to='workshops.Lesson', verbose_name="Topic and lessons you're comfortable teaching"),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='occupation',
field=models.CharField(blank=True, help_text='Please choose the one that best describes you.', choices=[('undisclosed', 'Prefer not to say'), ('undergrad', 'Undergraduate student'), ('grad', 'Graduate student'), ('postdoc', 'Post-doctoral researcher'), ('faculty', 'Faculty'), ('research', 'Research staff (including research programmer)'), ('support', 'Support staff (including technical support)'), ('librarian', 'Librarian/archivist'), ('commerce', 'Commercial software developer '), ('', 'Other (enter below)')], max_length=40, default='undisclosed', verbose_name='What is your current occupation/career stage?'),
),
migrations.AlterField(
model_name='profileupdaterequest',
name='twitter',
field=models.CharField(blank=True, max_length=100, default='', verbose_name='Twitter username'),
),
]
| true | true |
f714200e0fceaf12937e196c4653e7f5a945e815 | 2,657 | bzl | Python | pkg/tests/path_test.bzl | hborawski/rules_pkg | 8d542763a3959db79175404758f46c7f3f385fa5 | [
"Apache-2.0"
] | null | null | null | pkg/tests/path_test.bzl | hborawski/rules_pkg | 8d542763a3959db79175404758f46c7f3f385fa5 | [
"Apache-2.0"
] | null | null | null | pkg/tests/path_test.bzl | hborawski/rules_pkg | 8d542763a3959db79175404758f46c7f3f385fa5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for path.bzl"""
load("@bazel_skylib//lib:unittest.bzl", "analysistest", "asserts", "unittest")
load("//:mappings.bzl", "pkg_mkdirs")
load("//:path.bzl", "compute_data_path")
##########
# Test compute_data_path
##########
def _compute_data_path_test_impl(ctx):
env = analysistest.begin(ctx)
target_under_test = analysistest.target_under_test(env)
# Subtle: This allows you to vendor the library into your own repo at some
# arbitrary path.
expect = ctx.attr.expected_path
if expect.startswith('tests'):
expect = ctx.label.package + expect[5:]
asserts.equals(
env,
expect,
compute_data_path(ctx, ctx.attr.in_path),
)
return analysistest.end(env)
compute_data_path_test = analysistest.make(
_compute_data_path_test_impl,
attrs = {
"in_path": attr.string(mandatory = True),
"expected_path": attr.string(mandatory = True),
},
)
def _test_compute_data_path(name):
pkg_mkdirs(
name = "dummy",
dirs = [],
tags = ["manual"],
)
compute_data_path_test(
name = name + "_normal_test",
target_under_test = ":dummy",
in_path = "a/b/c",
expected_path = "tests/a/b/c",
)
compute_data_path_test(
name = name + "_absolute_test",
target_under_test = ":dummy",
in_path = "/a/b/c",
expected_path = "a/b/c",
)
compute_data_path_test(
name = name + "_relative_test",
target_under_test = ":dummy",
in_path = "./a/b/c",
expected_path = "tests/a/b/c",
)
compute_data_path_test(
name = name + "_empty_test",
target_under_test = ":dummy",
in_path = "./",
expected_path = "tests",
)
compute_data_path_test(
name = name + "_empty2_test",
target_under_test = ":dummy",
in_path = "./.",
expected_path = "tests",
)
def path_tests(name):
"""Declare path.bzl analysis tests."""
_test_compute_data_path(name=name + "_compute_data_path")
| 28.880435 | 78 | 0.637938 |
load("@bazel_skylib//lib:unittest.bzl", "analysistest", "asserts", "unittest")
load("//:mappings.bzl", "pkg_mkdirs")
load("//:path.bzl", "compute_data_path")
er_test = analysistest.target_under_test(env)
expect = ctx.attr.expected_path
if expect.startswith('tests'):
expect = ctx.label.package + expect[5:]
asserts.equals(
env,
expect,
compute_data_path(ctx, ctx.attr.in_path),
)
return analysistest.end(env)
compute_data_path_test = analysistest.make(
_compute_data_path_test_impl,
attrs = {
"in_path": attr.string(mandatory = True),
"expected_path": attr.string(mandatory = True),
},
)
def _test_compute_data_path(name):
pkg_mkdirs(
name = "dummy",
dirs = [],
tags = ["manual"],
)
compute_data_path_test(
name = name + "_normal_test",
target_under_test = ":dummy",
in_path = "a/b/c",
expected_path = "tests/a/b/c",
)
compute_data_path_test(
name = name + "_absolute_test",
target_under_test = ":dummy",
in_path = "/a/b/c",
expected_path = "a/b/c",
)
compute_data_path_test(
name = name + "_relative_test",
target_under_test = ":dummy",
in_path = "./a/b/c",
expected_path = "tests/a/b/c",
)
compute_data_path_test(
name = name + "_empty_test",
target_under_test = ":dummy",
in_path = "./",
expected_path = "tests",
)
compute_data_path_test(
name = name + "_empty2_test",
target_under_test = ":dummy",
in_path = "./.",
expected_path = "tests",
)
def path_tests(name):
_test_compute_data_path(name=name + "_compute_data_path")
| true | true |
f71421b82991b2284752df5cbc3abe620e97baaf | 8,509 | py | Python | tests/test_version.py | dfroger/conda | c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa | [
"BSD-3-Clause"
] | null | null | null | tests/test_version.py | dfroger/conda | c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa | [
"BSD-3-Clause"
] | null | null | null | tests/test_version.py | dfroger/conda | c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function, absolute_import
import unittest
from conda.version import ver_eval, VersionSpec, VersionOrder, normalized_version
class TestVersionSpec(unittest.TestCase):
def test_version_order(self):
versions = [
(VersionOrder("0.4"), [[0], [0], [4]]),
(VersionOrder("0.4.0"), [[0], [0], [4], [0]]),
(VersionOrder("0.4.1a.vc11"),[[0], [0], [4], [1, 'a'],[0, 'vc', 11]]),
(VersionOrder("0.4.1.rc"), [[0], [0], [4], [1], [0, 'rc']]),
(VersionOrder("0.4.1.vc11"), [[0], [0], [4], [1],[0, 'vc', 11]]),
(VersionOrder("0.4.1"), [[0], [0], [4], [1]]),
(VersionOrder("0.5*"), [[0], [0], [5, '*']]),
(VersionOrder("0.5a1"), [[0], [0], [5, 'a', 1]]),
(VersionOrder("0.5b3"), [[0], [0], [5, 'b', 3]]),
(VersionOrder("0.5C1"), [[0], [0], [5, 'c', 1]]),
(VersionOrder("0.5z"), [[0], [0], [5, 'z']]),
(VersionOrder("0.5za"), [[0], [0], [5, 'za']]),
(VersionOrder("0.5"), [[0], [0], [5]]),
(VersionOrder("0.9.6"), [[0], [0], [9], [6]]),
(VersionOrder("0.960923"), [[0], [0], [960923]]),
(VersionOrder("1.0"), [[0], [1], [0]]),
(VersionOrder("1.0.4a3"), [[0], [1], [0], [4, 'a', 3]]),
(VersionOrder("1.0.4b1"), [[0], [1], [0], [4, 'b', 1]]),
(VersionOrder("1.0.4"), [[0], [1], [0], [4]]),
(VersionOrder("1.1dev1"), [[0], [1], [1, 'DEV', 1]]),
(VersionOrder("1.1a1"), [[0], [1], [1, 'a', 1]]),
(VersionOrder("1.1.dev1"), [[0], [1], [1], [0, 'DEV', 1]]),
(VersionOrder("1.1.a1"), [[0], [1], [1], [0, 'a', 1]]),
(VersionOrder("1.1"), [[0], [1], [1]]),
(VersionOrder("1.1.post1"), [[0], [1], [1], [0, float('inf'), 1]]),
(VersionOrder("1.1.1dev1"), [[0], [1], [1], [1, 'DEV', 1]]),
(VersionOrder("1.1.1rc1"), [[0], [1], [1], [1, 'rc', 1]]),
(VersionOrder("1.1.1"), [[0], [1], [1], [1]]),
(VersionOrder("1.1.1post1"), [[0], [1], [1], [1, float('inf'), 1]]),
(VersionOrder("1.1post1"), [[0], [1], [1, float('inf'), 1]]),
(VersionOrder("2g6"), [[0], [2, 'g', 6]]),
(VersionOrder("2.0b1pr0"), [[0], [2], [0, 'b', 1, 'pr', 0]]),
(VersionOrder("2.2be.ta29"), [[0], [2], [2, 'be'], [0, 'ta', 29]]),
(VersionOrder("2.2be5ta29"), [[0], [2], [2, 'be', 5, 'ta', 29]]),
(VersionOrder("2.2beta29"), [[0], [2], [2, 'beta', 29]]),
(VersionOrder("2.2.0.1"), [[0], [2], [2],[0],[1]]),
(VersionOrder("3.1.1.6"), [[0], [3], [1], [1], [6]]),
(VersionOrder("3.2.p.r0"), [[0], [3], [2], [0, 'p'], [0, 'r', 0]]),
(VersionOrder("3.2.pr0"), [[0], [3], [2], [0, 'pr', 0]]),
(VersionOrder("3.2.pr.1"), [[0], [3], [2], [0, 'pr'], [1]]),
(VersionOrder("5.5.kw"), [[0], [5], [5], [0, 'kw']]),
(VersionOrder("11g"), [[0], [11, 'g']]),
(VersionOrder("14.3.1"), [[0], [14], [3], [1]]),
(VersionOrder("14.3.1.post26.g9d75ca2"),
[[0],[14],[3],[1],[0,float('inf'),26],[0,'g',9,'d',75,'ca',2]]),
(VersionOrder("1996.07.12"), [[0], [1996], [7], [12]]),
(VersionOrder("1!0.4.1"), [[1], [0], [4], [1]]),
(VersionOrder("1!3.1.1.6"), [[1], [3], [1], [1], [6]]),
(VersionOrder("2!0.4.1"), [[2], [0], [4], [1]]),
]
# check parser
for v, l in versions:
self.assertEqual(v.version, l)
self.assertEqual(VersionOrder("0.4.1.rc"), VersionOrder(" 0.4.1.RC "))
self.assertEqual(normalized_version(" 0.4.1.RC "), VersionOrder("0.4.1.rc"))
with self.assertRaises(ValueError):
VersionOrder("")
with self.assertRaises(ValueError):
VersionOrder(" ")
with self.assertRaises(ValueError):
VersionOrder("5.5++")
with self.assertRaises(ValueError):
VersionOrder("5.5..mw")
with self.assertRaises(ValueError):
VersionOrder("5.5.mw.")
with self.assertRaises(ValueError):
VersionOrder("!")
with self.assertRaises(ValueError):
VersionOrder("a!1.0")
# check __eq__
self.assertEqual(VersionOrder(" 0.4.rc "), VersionOrder("0.4.RC"))
self.assertEqual(VersionOrder("0.4"), VersionOrder("0.4.0"))
self.assertNotEqual(VersionOrder("0.4"), VersionOrder("0.4.1"))
self.assertEqual(VersionOrder("0.4.a1"), VersionOrder("0.4.0a1"))
self.assertNotEqual(VersionOrder("0.4.a1"), VersionOrder("0.4.1a1"))
# check __lt__
self.assertEqual(sorted(versions, key=lambda x: x[0]), versions)
# test openssl convention
openssl = [VersionOrder(k) for k in ['1.0.1', '1.0.1post.a', '1.0.1post.b',
'1.0.1post.z', '1.0.1post.za', '1.0.2']]
self.assertEqual(sorted(openssl), openssl)
def test_pep440(self):
# this list must be in sorted order (slightly modified from the PEP 440 test suite
# https://github.com/pypa/packaging/blob/master/tests/test_version.py)
VERSIONS = [
# Implicit epoch of 0
"1.0a1", "1.0a2.dev456", "1.0a12.dev456", "1.0a12",
"1.0b1.dev456", "1.0b2", "1.0b2.post345.dev456", "1.0b2.post345",
"1.0c1.dev456", "1.0c1", "1.0c3", "1.0rc2", "1.0.dev456", "1.0",
"1.0.post456.dev34", "1.0.post456", "1.1.dev1",
"1.2.r32+123456", "1.2.rev33+123456",
"1.2+abc", "1.2+abc123def", "1.2+abc123",
"1.2+123abc", "1.2+123abc456", "1.2+1234.abc", "1.2+123456",
# Explicit epoch of 1
"1!1.0a1", "1!1.0a2.dev456", "1!1.0a12.dev456", "1!1.0a12",
"1!1.0b1.dev456", "1!1.0b2", "1!1.0b2.post345.dev456", "1!1.0b2.post345",
"1!1.0c1.dev456", "1!1.0c1", "1!1.0c3", "1!1.0rc2", "1!1.0.dev456", "1!1.0",
"1!1.0.post456.dev34", "1!1.0.post456", "1!1.1.dev1",
"1!1.2.r32+123456", "1!1.2.rev33+123456",
"1!1.2+abc", "1!1.2+abc123def", "1!1.2+abc123",
"1!1.2+123abc", "1!1.2+123abc456", "1!1.2+1234.abc", "1!1.2+123456",
]
version = [VersionOrder(v) for v in VERSIONS]
self.assertEqual(version, sorted(version))
def test_hexrd(self):
VERSIONS = ['0.3.0.dev', '0.3.3']
vos = [VersionOrder(v) for v in VERSIONS]
self.assertEqual(sorted(vos), vos)
def test_ver_eval(self):
self.assertEqual(ver_eval('1.7.0', '==1.7'), True)
self.assertEqual(ver_eval('1.7.0', '<=1.7'), True)
self.assertEqual(ver_eval('1.7.0', '<1.7'), False)
self.assertEqual(ver_eval('1.7.0', '>=1.7'), True)
self.assertEqual(ver_eval('1.7.0', '>1.7'), False)
self.assertEqual(ver_eval('1.6.7', '>=1.7'), False)
self.assertEqual(ver_eval('2013a', '>2013b'), False)
self.assertEqual(ver_eval('2013k', '>2013b'), True)
self.assertEqual(ver_eval('3.0.0', '>2013b'), False)
self.assertEqual(ver_eval('1.0.0', '>1.0.0a'), True)
self.assertEqual(ver_eval('1.0.0', '>1.0.0*'), True)
def test_ver_eval_errors(self):
self.assertRaises(RuntimeError, ver_eval, '3.0.0', '><2.4.5')
self.assertRaises(RuntimeError, ver_eval, '3.0.0', '!!2.4.5')
self.assertRaises(RuntimeError, ver_eval, '3.0.0', '!')
def test_match(self):
for vspec, res in [
('1.7*', True), ('1.7.1', True), ('1.7.0', False),
('1.7', False), ('1.5*', False), ('>=1.5', True),
('!=1.5', True), ('!=1.7.1', False), ('==1.7.1', True),
('==1.7', False), ('==1.7.2', False), ('==1.7.1.0', True),
]:
m = VersionSpec(vspec)
self.assertEqual(m.match('1.7.1'), res)
def test_local_identifier(self):
"""The separator for the local identifier should be either `.` or `+`"""
# a valid versionstr should match itself
versions = (
'1.7.0'
'1.7.0.post123'
'1.7.0.post123.gabcdef9',
'1.7.0.post123+gabcdef9',
)
for version in versions:
m = VersionSpec(version)
self.assertTrue(m.match(version))
| 50.349112 | 104 | 0.466212 | from __future__ import print_function, absolute_import
import unittest
from conda.version import ver_eval, VersionSpec, VersionOrder, normalized_version
class TestVersionSpec(unittest.TestCase):
def test_version_order(self):
versions = [
(VersionOrder("0.4"), [[0], [0], [4]]),
(VersionOrder("0.4.0"), [[0], [0], [4], [0]]),
(VersionOrder("0.4.1a.vc11"),[[0], [0], [4], [1, 'a'],[0, 'vc', 11]]),
(VersionOrder("0.4.1.rc"), [[0], [0], [4], [1], [0, 'rc']]),
(VersionOrder("0.4.1.vc11"), [[0], [0], [4], [1],[0, 'vc', 11]]),
(VersionOrder("0.4.1"), [[0], [0], [4], [1]]),
(VersionOrder("0.5*"), [[0], [0], [5, '*']]),
(VersionOrder("0.5a1"), [[0], [0], [5, 'a', 1]]),
(VersionOrder("0.5b3"), [[0], [0], [5, 'b', 3]]),
(VersionOrder("0.5C1"), [[0], [0], [5, 'c', 1]]),
(VersionOrder("0.5z"), [[0], [0], [5, 'z']]),
(VersionOrder("0.5za"), [[0], [0], [5, 'za']]),
(VersionOrder("0.5"), [[0], [0], [5]]),
(VersionOrder("0.9.6"), [[0], [0], [9], [6]]),
(VersionOrder("0.960923"), [[0], [0], [960923]]),
(VersionOrder("1.0"), [[0], [1], [0]]),
(VersionOrder("1.0.4a3"), [[0], [1], [0], [4, 'a', 3]]),
(VersionOrder("1.0.4b1"), [[0], [1], [0], [4, 'b', 1]]),
(VersionOrder("1.0.4"), [[0], [1], [0], [4]]),
(VersionOrder("1.1dev1"), [[0], [1], [1, 'DEV', 1]]),
(VersionOrder("1.1a1"), [[0], [1], [1, 'a', 1]]),
(VersionOrder("1.1.dev1"), [[0], [1], [1], [0, 'DEV', 1]]),
(VersionOrder("1.1.a1"), [[0], [1], [1], [0, 'a', 1]]),
(VersionOrder("1.1"), [[0], [1], [1]]),
(VersionOrder("1.1.post1"), [[0], [1], [1], [0, float('inf'), 1]]),
(VersionOrder("1.1.1dev1"), [[0], [1], [1], [1, 'DEV', 1]]),
(VersionOrder("1.1.1rc1"), [[0], [1], [1], [1, 'rc', 1]]),
(VersionOrder("1.1.1"), [[0], [1], [1], [1]]),
(VersionOrder("1.1.1post1"), [[0], [1], [1], [1, float('inf'), 1]]),
(VersionOrder("1.1post1"), [[0], [1], [1, float('inf'), 1]]),
(VersionOrder("2g6"), [[0], [2, 'g', 6]]),
(VersionOrder("2.0b1pr0"), [[0], [2], [0, 'b', 1, 'pr', 0]]),
(VersionOrder("2.2be.ta29"), [[0], [2], [2, 'be'], [0, 'ta', 29]]),
(VersionOrder("2.2be5ta29"), [[0], [2], [2, 'be', 5, 'ta', 29]]),
(VersionOrder("2.2beta29"), [[0], [2], [2, 'beta', 29]]),
(VersionOrder("2.2.0.1"), [[0], [2], [2],[0],[1]]),
(VersionOrder("3.1.1.6"), [[0], [3], [1], [1], [6]]),
(VersionOrder("3.2.p.r0"), [[0], [3], [2], [0, 'p'], [0, 'r', 0]]),
(VersionOrder("3.2.pr0"), [[0], [3], [2], [0, 'pr', 0]]),
(VersionOrder("3.2.pr.1"), [[0], [3], [2], [0, 'pr'], [1]]),
(VersionOrder("5.5.kw"), [[0], [5], [5], [0, 'kw']]),
(VersionOrder("11g"), [[0], [11, 'g']]),
(VersionOrder("14.3.1"), [[0], [14], [3], [1]]),
(VersionOrder("14.3.1.post26.g9d75ca2"),
[[0],[14],[3],[1],[0,float('inf'),26],[0,'g',9,'d',75,'ca',2]]),
(VersionOrder("1996.07.12"), [[0], [1996], [7], [12]]),
(VersionOrder("1!0.4.1"), [[1], [0], [4], [1]]),
(VersionOrder("1!3.1.1.6"), [[1], [3], [1], [1], [6]]),
(VersionOrder("2!0.4.1"), [[2], [0], [4], [1]]),
]
for v, l in versions:
self.assertEqual(v.version, l)
self.assertEqual(VersionOrder("0.4.1.rc"), VersionOrder(" 0.4.1.RC "))
self.assertEqual(normalized_version(" 0.4.1.RC "), VersionOrder("0.4.1.rc"))
with self.assertRaises(ValueError):
VersionOrder("")
with self.assertRaises(ValueError):
VersionOrder(" ")
with self.assertRaises(ValueError):
VersionOrder("5.5++")
with self.assertRaises(ValueError):
VersionOrder("5.5..mw")
with self.assertRaises(ValueError):
VersionOrder("5.5.mw.")
with self.assertRaises(ValueError):
VersionOrder("!")
with self.assertRaises(ValueError):
VersionOrder("a!1.0")
self.assertEqual(VersionOrder(" 0.4.rc "), VersionOrder("0.4.RC"))
self.assertEqual(VersionOrder("0.4"), VersionOrder("0.4.0"))
self.assertNotEqual(VersionOrder("0.4"), VersionOrder("0.4.1"))
self.assertEqual(VersionOrder("0.4.a1"), VersionOrder("0.4.0a1"))
self.assertNotEqual(VersionOrder("0.4.a1"), VersionOrder("0.4.1a1"))
self.assertEqual(sorted(versions, key=lambda x: x[0]), versions)
openssl = [VersionOrder(k) for k in ['1.0.1', '1.0.1post.a', '1.0.1post.b',
'1.0.1post.z', '1.0.1post.za', '1.0.2']]
self.assertEqual(sorted(openssl), openssl)
def test_pep440(self):
VERSIONS = [
"1.0a1", "1.0a2.dev456", "1.0a12.dev456", "1.0a12",
"1.0b1.dev456", "1.0b2", "1.0b2.post345.dev456", "1.0b2.post345",
"1.0c1.dev456", "1.0c1", "1.0c3", "1.0rc2", "1.0.dev456", "1.0",
"1.0.post456.dev34", "1.0.post456", "1.1.dev1",
"1.2.r32+123456", "1.2.rev33+123456",
"1.2+abc", "1.2+abc123def", "1.2+abc123",
"1.2+123abc", "1.2+123abc456", "1.2+1234.abc", "1.2+123456",
"1!1.0a1", "1!1.0a2.dev456", "1!1.0a12.dev456", "1!1.0a12",
"1!1.0b1.dev456", "1!1.0b2", "1!1.0b2.post345.dev456", "1!1.0b2.post345",
"1!1.0c1.dev456", "1!1.0c1", "1!1.0c3", "1!1.0rc2", "1!1.0.dev456", "1!1.0",
"1!1.0.post456.dev34", "1!1.0.post456", "1!1.1.dev1",
"1!1.2.r32+123456", "1!1.2.rev33+123456",
"1!1.2+abc", "1!1.2+abc123def", "1!1.2+abc123",
"1!1.2+123abc", "1!1.2+123abc456", "1!1.2+1234.abc", "1!1.2+123456",
]
version = [VersionOrder(v) for v in VERSIONS]
self.assertEqual(version, sorted(version))
def test_hexrd(self):
VERSIONS = ['0.3.0.dev', '0.3.3']
vos = [VersionOrder(v) for v in VERSIONS]
self.assertEqual(sorted(vos), vos)
def test_ver_eval(self):
self.assertEqual(ver_eval('1.7.0', '==1.7'), True)
self.assertEqual(ver_eval('1.7.0', '<=1.7'), True)
self.assertEqual(ver_eval('1.7.0', '<1.7'), False)
self.assertEqual(ver_eval('1.7.0', '>=1.7'), True)
self.assertEqual(ver_eval('1.7.0', '>1.7'), False)
self.assertEqual(ver_eval('1.6.7', '>=1.7'), False)
self.assertEqual(ver_eval('2013a', '>2013b'), False)
self.assertEqual(ver_eval('2013k', '>2013b'), True)
self.assertEqual(ver_eval('3.0.0', '>2013b'), False)
self.assertEqual(ver_eval('1.0.0', '>1.0.0a'), True)
self.assertEqual(ver_eval('1.0.0', '>1.0.0*'), True)
def test_ver_eval_errors(self):
self.assertRaises(RuntimeError, ver_eval, '3.0.0', '><2.4.5')
self.assertRaises(RuntimeError, ver_eval, '3.0.0', '!!2.4.5')
self.assertRaises(RuntimeError, ver_eval, '3.0.0', '!')
def test_match(self):
for vspec, res in [
('1.7*', True), ('1.7.1', True), ('1.7.0', False),
('1.7', False), ('1.5*', False), ('>=1.5', True),
('!=1.5', True), ('!=1.7.1', False), ('==1.7.1', True),
('==1.7', False), ('==1.7.2', False), ('==1.7.1.0', True),
]:
m = VersionSpec(vspec)
self.assertEqual(m.match('1.7.1'), res)
def test_local_identifier(self):
versions = (
'1.7.0'
'1.7.0.post123'
'1.7.0.post123.gabcdef9',
'1.7.0.post123+gabcdef9',
)
for version in versions:
m = VersionSpec(version)
self.assertTrue(m.match(version))
| true | true |
f7142361616cc1d896aba889094e5bdfd2013e97 | 178 | py | Python | frederic/hello_world.py | infelane/python-for-java-devs | 56f313f89ad8603598f879f31e0d9a35795e50e3 | [
"Apache-2.0"
] | 1 | 2019-10-20T16:05:30.000Z | 2019-10-20T16:05:30.000Z | frederic/hello_world.py | infelane/python-for-java-devs | 56f313f89ad8603598f879f31e0d9a35795e50e3 | [
"Apache-2.0"
] | 1 | 2020-07-10T09:09:58.000Z | 2020-07-10T09:09:58.000Z | frederic/hello_world.py | infelane/python-for-java-devs | 56f313f89ad8603598f879f31e0d9a35795e50e3 | [
"Apache-2.0"
] | 3 | 2020-07-10T07:46:51.000Z | 2022-02-21T08:58:45.000Z | import logging
def say(n):
logging.basicConfig(level=logging.DEBUG)
for i in range(n):
logging.info(str(i) + ": Hello world")
say(1)
if __name__=="__main__":
say(3)
| 14.833333 | 42 | 0.662921 | import logging
def say(n):
logging.basicConfig(level=logging.DEBUG)
for i in range(n):
logging.info(str(i) + ": Hello world")
say(1)
if __name__=="__main__":
say(3)
| true | true |
f71423f13ea7f981319138912b683e99f21321aa | 3,285 | py | Python | invenio_assets/npm.py | pazembrz/invenio-assets | dd9acd8f0bf1a10eb2593949d9f1b6cc6d95ef43 | [
"MIT"
] | 1 | 2018-10-24T11:17:30.000Z | 2018-10-24T11:17:30.000Z | invenio_assets/npm.py | okraskaj/invenio-assets | e0f48743982d16cffa312fa9fe392c62db906d89 | [
"MIT"
] | null | null | null | invenio_assets/npm.py | okraskaj/invenio-assets | e0f48743982d16cffa312fa9fe392c62db906d89 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Bundle class with support for npm dependencies."""
from __future__ import absolute_import, print_function
from collections import defaultdict
import semver
from flask_assets import Bundle as BundleBase
from pkg_resources import parse_version
from speaklater import is_lazy_string
__all__ = ('LazyNpmBundle', 'NpmBundle', 'extract_deps', 'make_semver', )
class NpmBundle(BundleBase):
"""Bundle extension with a name and npm dependencies.
The npm dependencies are used to generate a package.json file.
"""
def __init__(self, *contents, **options):
"""Initialize the named bundle.
:param name: name of the bundle
:type name: str
:param npm: npm dependencies
:type npm: dict
"""
self.npm = options.pop('npm', {})
super(NpmBundle, self).__init__(*contents, **options)
class LazyNpmBundle(NpmBundle):
"""Magically evaluate lazy strings as file names."""
def _get_contents(self):
"""Create strings from lazy strings."""
return [
str(value) if is_lazy_string(value) else value
for value in super(LazyNpmBundle, self)._get_contents()
]
contents = property(_get_contents, NpmBundle._set_contents)
def extract_deps(bundles, log=None):
"""Extract the dependencies from the bundle and its sub-bundles."""
def _flatten(bundle):
deps = []
if hasattr(bundle, 'npm'):
deps.append(bundle.npm)
for content in bundle.contents:
if isinstance(content, BundleBase):
deps.extend(_flatten(content))
return deps
flatten_deps = []
for bundle in bundles:
flatten_deps.extend(_flatten(bundle))
packages = defaultdict(list)
for dep in flatten_deps:
for pkg, version in dep.items():
packages[pkg].append(version)
deps = {}
for package, versions in packages.items():
deps[package] = semver.max_satisfying(versions, '*', True)
if log and len(versions) > 1:
log('Warn: {0} version {1} resolved to: {2}'.format(
repr(package), versions, repr(deps[package])
))
return deps
def make_semver(version_str):
"""Make a semantic version from Python PEP440 version.
Semantic versions does not handle post-releases.
"""
v = parse_version(version_str)
major = v._version.release[0]
try:
minor = v._version.release[1]
except IndexError:
minor = 0
try:
patch = v._version.release[2]
except IndexError:
patch = 0
prerelease = []
if v._version.pre:
prerelease.append(''.join(str(x) for x in v._version.pre))
if v._version.dev:
prerelease.append(''.join(str(x) for x in v._version.dev))
prerelease = '.'.join(prerelease)
# Create semver
version = '{0}.{1}.{2}'.format(major, minor, patch)
if prerelease:
version += '-{0}'.format(prerelease)
if v.local:
version += '+{0}'.format(v.local)
return version
| 27.605042 | 73 | 0.635616 |
from __future__ import absolute_import, print_function
from collections import defaultdict
import semver
from flask_assets import Bundle as BundleBase
from pkg_resources import parse_version
from speaklater import is_lazy_string
__all__ = ('LazyNpmBundle', 'NpmBundle', 'extract_deps', 'make_semver', )
class NpmBundle(BundleBase):
def __init__(self, *contents, **options):
self.npm = options.pop('npm', {})
super(NpmBundle, self).__init__(*contents, **options)
class LazyNpmBundle(NpmBundle):
def _get_contents(self):
return [
str(value) if is_lazy_string(value) else value
for value in super(LazyNpmBundle, self)._get_contents()
]
contents = property(_get_contents, NpmBundle._set_contents)
def extract_deps(bundles, log=None):
def _flatten(bundle):
deps = []
if hasattr(bundle, 'npm'):
deps.append(bundle.npm)
for content in bundle.contents:
if isinstance(content, BundleBase):
deps.extend(_flatten(content))
return deps
flatten_deps = []
for bundle in bundles:
flatten_deps.extend(_flatten(bundle))
packages = defaultdict(list)
for dep in flatten_deps:
for pkg, version in dep.items():
packages[pkg].append(version)
deps = {}
for package, versions in packages.items():
deps[package] = semver.max_satisfying(versions, '*', True)
if log and len(versions) > 1:
log('Warn: {0} version {1} resolved to: {2}'.format(
repr(package), versions, repr(deps[package])
))
return deps
def make_semver(version_str):
v = parse_version(version_str)
major = v._version.release[0]
try:
minor = v._version.release[1]
except IndexError:
minor = 0
try:
patch = v._version.release[2]
except IndexError:
patch = 0
prerelease = []
if v._version.pre:
prerelease.append(''.join(str(x) for x in v._version.pre))
if v._version.dev:
prerelease.append(''.join(str(x) for x in v._version.dev))
prerelease = '.'.join(prerelease)
version = '{0}.{1}.{2}'.format(major, minor, patch)
if prerelease:
version += '-{0}'.format(prerelease)
if v.local:
version += '+{0}'.format(v.local)
return version
| true | true |
f7142447c41c8ccca6568a1c6a262abb22071efc | 2,371 | py | Python | geometry/matrix.py | GuillaumeRochette/HumanViewSynthesis | d65ea8744e284ec956bbc04f294f05e47731360f | [
"Apache-2.0"
] | 10 | 2021-11-30T05:56:10.000Z | 2022-03-16T07:06:16.000Z | geometry/matrix.py | GuillaumeRochette/Reconstruction3D | 358d9cb55486ad0f81a31df8ab4159153765e7e5 | [
"Apache-2.0"
] | 1 | 2022-01-10T09:27:22.000Z | 2022-01-10T09:27:22.000Z | geometry/matrix.py | GuillaumeRochette/Reconstruction3D | 358d9cb55486ad0f81a31df8ab4159153765e7e5 | [
"Apache-2.0"
] | null | null | null | from typing import Tuple
import torch
from torch import Tensor
def homogeneous(A: Tensor, b: Tensor) -> Tensor:
"""
Converts heterogeneous matrix into homogeneous matrix.
:param A: Heterogeneous matrix of shape [*, N, N].
:param b: Heterogeneous vector of shape [*, N, 1].
:return: Homogeneous matrix of shape [*, N + 1, N + 1].
"""
assert A.shape[:-2] == b.shape[:-2]
assert A.shape[-2] == A.shape[-1] == b.shape[-2]
assert b.shape[-1] == 1
s, n = A.shape[:-2], A.shape[-2]
c = torch.zeros(s + (1, n), dtype=A.dtype, device=A.device)
d = torch.ones(s + (1, 1), dtype=A.dtype, device=A.device)
M = torch.cat(
[
torch.cat([A, b], dim=-1),
torch.cat([c, d], dim=-1),
],
dim=-2,
)
return M
def heterogeneous(M: Tensor) -> Tuple[Tensor, Tensor]:
"""
Converts homogeneous matrix into heterogeneous matrix.
:param M: Homogeneous matrix of shape [*, N + 1, N + 1].
:return: Heterogeneous matrix and vector of shapes [*, N, N] and [*, N, 1] respectively.
"""
assert M.shape[-2] == M.shape[-1]
n = M.shape[-2] - 1
Ab, cd = M.split([n, 1], dim=-2)
A, b = Ab.split([n, 1], dim=-1)
c, d = cd.split([n, 1], dim=-1)
A, b = A / d, b / d
return A, b
def affine(x: Tensor, A: Tensor, b: Tensor) -> Tensor:
"""
Applies an affine transformation to x given A and b.
:param x: Vector of shape [*, N, 1].
:param A: Matrix of shape [*, N, N].
:param b: Vector of shape [*, N, 1].
:return: Vector of shape [*, N, 1].
"""
assert x.ndim == A.ndim == b.ndim
assert x.shape[-2] == A.shape[-2] == A.shape[-1] == b.shape[-2]
assert x.shape[-1] == b.shape[-1] == 1
y = A @ x + b
return y
def eye_like(x: Tensor) -> Tensor:
"""
Return an identity matrix of the same shape as x.
:param x: Matrix of shape [*, M, N].
:return: Identity matrix of shape [*, M, N].
"""
m, n = x.shape[-2], x.shape[-1]
return torch.eye(m, n, dtype=x.dtype, device=x.device).expand_as(x)
def diag(x: Tensor):
"""
Returns a diagonal matrix given a vector.
:param x: Vector of shape [*, M, 1].
:return: Diagonal matrix of shape [*, M, M].
"""
assert x.shape[-1] == 1
m = x.shape[-2]
return torch.eye(m, dtype=x.dtype, device=x.device) * x
| 25.223404 | 92 | 0.547027 | from typing import Tuple
import torch
from torch import Tensor
def homogeneous(A: Tensor, b: Tensor) -> Tensor:
assert A.shape[:-2] == b.shape[:-2]
assert A.shape[-2] == A.shape[-1] == b.shape[-2]
assert b.shape[-1] == 1
s, n = A.shape[:-2], A.shape[-2]
c = torch.zeros(s + (1, n), dtype=A.dtype, device=A.device)
d = torch.ones(s + (1, 1), dtype=A.dtype, device=A.device)
M = torch.cat(
[
torch.cat([A, b], dim=-1),
torch.cat([c, d], dim=-1),
],
dim=-2,
)
return M
def heterogeneous(M: Tensor) -> Tuple[Tensor, Tensor]:
assert M.shape[-2] == M.shape[-1]
n = M.shape[-2] - 1
Ab, cd = M.split([n, 1], dim=-2)
A, b = Ab.split([n, 1], dim=-1)
c, d = cd.split([n, 1], dim=-1)
A, b = A / d, b / d
return A, b
def affine(x: Tensor, A: Tensor, b: Tensor) -> Tensor:
assert x.ndim == A.ndim == b.ndim
assert x.shape[-2] == A.shape[-2] == A.shape[-1] == b.shape[-2]
assert x.shape[-1] == b.shape[-1] == 1
y = A @ x + b
return y
def eye_like(x: Tensor) -> Tensor:
m, n = x.shape[-2], x.shape[-1]
return torch.eye(m, n, dtype=x.dtype, device=x.device).expand_as(x)
def diag(x: Tensor):
assert x.shape[-1] == 1
m = x.shape[-2]
return torch.eye(m, dtype=x.dtype, device=x.device) * x
| true | true |
f714244989fbad4476876ccb83e7dfefdbb4a895 | 1,275 | py | Python | hardware/ci/build.py | Axford/AFRo | e57369c2d37e6001587cd37307d36c5f9f112c53 | [
"MIT"
] | null | null | null | hardware/ci/build.py | Axford/AFRo | e57369c2d37e6001587cd37307d36c5f9f112c53 | [
"MIT"
] | null | null | null | hardware/ci/build.py | Axford/AFRo | e57369c2d37e6001587cd37307d36c5f9f112c53 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Run the various build scripts
import sys
import os
from parse import parse_machines
from machines import machines
from assemblies import assemblies
from vitamins import vitamins
from printed import printed
from guides import guides
from publish import publish
def build(do_publish=0):
print("Build")
print("-----")
outfile = 'hardware.json'
oldfile = 'backup.json'
print("Backup current json...")
oldjso = None
if os.path.isfile(outfile) and not os.path.isfile(oldfile):
os.rename(outfile, oldfile)
errorlevel = 0
errorlevel += parse_machines()
if errorlevel == 0:
errorlevel += vitamins()
if errorlevel == 0:
errorlevel += printed()
if errorlevel == 0:
errorlevel += assemblies()
if errorlevel == 0:
errorlevel += machines()
if errorlevel == 0:
errorlevel += guides()
if errorlevel == 0 and do_publish > 0:
publish()
# if everything is ok then delete backup - no longer required
if errorlevel == 0:
os.remove(oldfile)
return errorlevel
if __name__ == '__main__':
if len(sys.argv) == 2:
sys.exit(build(sys.argv[1]))
else:
sys.exit(build(0)) | 22.368421 | 65 | 0.620392 |
import sys
import os
from parse import parse_machines
from machines import machines
from assemblies import assemblies
from vitamins import vitamins
from printed import printed
from guides import guides
from publish import publish
def build(do_publish=0):
print("Build")
print("-----")
outfile = 'hardware.json'
oldfile = 'backup.json'
print("Backup current json...")
oldjso = None
if os.path.isfile(outfile) and not os.path.isfile(oldfile):
os.rename(outfile, oldfile)
errorlevel = 0
errorlevel += parse_machines()
if errorlevel == 0:
errorlevel += vitamins()
if errorlevel == 0:
errorlevel += printed()
if errorlevel == 0:
errorlevel += assemblies()
if errorlevel == 0:
errorlevel += machines()
if errorlevel == 0:
errorlevel += guides()
if errorlevel == 0 and do_publish > 0:
publish()
if errorlevel == 0:
os.remove(oldfile)
return errorlevel
if __name__ == '__main__':
if len(sys.argv) == 2:
sys.exit(build(sys.argv[1]))
else:
sys.exit(build(0)) | true | true |
f7142459f0d44ac68274d37d43ee06f916581229 | 301 | py | Python | kai/model/__init__.py | Pylons/kai | 8c843bdb7508a25dea094fdd38bd5b5cc521d486 | [
"BSD-3-Clause"
] | 1 | 2021-04-27T19:13:28.000Z | 2021-04-27T19:13:28.000Z | kai/model/__init__.py | Pylons/kai | 8c843bdb7508a25dea094fdd38bd5b5cc521d486 | [
"BSD-3-Clause"
] | null | null | null | kai/model/__init__.py | Pylons/kai | 8c843bdb7508a25dea094fdd38bd5b5cc521d486 | [
"BSD-3-Clause"
] | null | null | null | """CouchDB Models"""
from kai.model.blog import Article
from kai.model.documentation import Documentation
from kai.model.generics import Comment, Rating
from kai.model.human import Human
from kai.model.paste import Paste
from kai.model.snippet import Snippet
from kai.model.traceback import Traceback
| 33.444444 | 49 | 0.827243 | from kai.model.blog import Article
from kai.model.documentation import Documentation
from kai.model.generics import Comment, Rating
from kai.model.human import Human
from kai.model.paste import Paste
from kai.model.snippet import Snippet
from kai.model.traceback import Traceback
| true | true |
f714245b3cc1fcb188d83f0e5b42aacd5f5699f1 | 1,095 | py | Python | beautiful_earth/app/routes.py | craklyn/space-apps-2019 | f0a7e28907b9471b207d75eb840b1bd27ef99547 | [
"MIT"
] | 6 | 2021-08-16T14:48:30.000Z | 2022-01-25T01:06:03.000Z | beautiful_earth/app/routes.py | craklyn/space-apps-2019 | f0a7e28907b9471b207d75eb840b1bd27ef99547 | [
"MIT"
] | 3 | 2021-06-08T20:29:10.000Z | 2022-03-12T00:02:34.000Z | beautiful_earth/app/routes.py | craklyn/space-apps-2019 | f0a7e28907b9471b207d75eb840b1bd27ef99547 | [
"MIT"
] | 1 | 2019-10-21T05:42:25.000Z | 2019-10-21T05:42:25.000Z | from flask import render_template
from flask import request
from flask import send_file
from flask import make_response
import cv2
import urllib
import numpy as np
# Add the pytorch folder to our script path
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '/Users/danielblackburn/space-apps-2019/beautiful_earth/pytorch-CycleGAN-and-pix2pix')
import inference
from inference import infer
from app import app
@app.route('/')
@app.route('/index')
def index():
user = {'username': 'Miguel'}
return render_template('index.html', title='Beautiful Earth')
@app.route('/image', methods = ['POST'])
def image():
json = request.json
imageUrl = json['imageUrl']
quadKey = json['quadKey']
temp = inference.infer(imageUrl)
filename = "/Users/danielblackburn/space-apps-2019/beautiful_earth/app/static/"+quadKey+".png"
cv2.imwrite(filename, temp)
response = make_response(send_file(filename, mimetype='image/jpeg', as_attachment=True, attachment_filename=quadKey))
response.headers['X-quadKey'] = quadKey
return response
| 28.076923 | 121 | 0.734247 | from flask import render_template
from flask import request
from flask import send_file
from flask import make_response
import cv2
import urllib
import numpy as np
import sys
sys.path.insert(1, '/Users/danielblackburn/space-apps-2019/beautiful_earth/pytorch-CycleGAN-and-pix2pix')
import inference
from inference import infer
from app import app
@app.route('/')
@app.route('/index')
def index():
user = {'username': 'Miguel'}
return render_template('index.html', title='Beautiful Earth')
@app.route('/image', methods = ['POST'])
def image():
json = request.json
imageUrl = json['imageUrl']
quadKey = json['quadKey']
temp = inference.infer(imageUrl)
filename = "/Users/danielblackburn/space-apps-2019/beautiful_earth/app/static/"+quadKey+".png"
cv2.imwrite(filename, temp)
response = make_response(send_file(filename, mimetype='image/jpeg', as_attachment=True, attachment_filename=quadKey))
response.headers['X-quadKey'] = quadKey
return response
| true | true |
f71424d21246fe49464870c05f4d12121a8e0237 | 79,831 | py | Python | core/domain/user_services.py | AbhinavGopal/oppiabackup | e5ae39b20623d4389885802d670b0142d82034ea | [
"Apache-2.0"
] | 1 | 2022-02-22T09:27:22.000Z | 2022-02-22T09:27:22.000Z | core/domain/user_services.py | IMADILKHAN/oppia | 454bf732dfd0087bcc0b8b7cd65d80ba386f4929 | [
"Apache-2.0"
] | null | null | null | core/domain/user_services.py | IMADILKHAN/oppia | 454bf732dfd0087bcc0b8b7cd65d80ba386f4929 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Services for user data."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import hashlib
import imghdr
import logging
import re
from constants import constants
from core.domain import role_services
from core.domain import user_domain
from core.platform import models
import feconf
import python_utils
import utils
from google.appengine.api import urlfetch
current_user_services = models.Registry.import_current_user_services()
(user_models, audit_models) = models.Registry.import_models(
[models.NAMES.user, models.NAMES.audit])
# Size (in px) of the gravatar being retrieved.
GRAVATAR_SIZE_PX = 150
# Data url for images/avatar/user_blue_72px.png.
# Generated using utils.convert_png_to_data_url.
DEFAULT_IDENTICON_DATA_URL = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEwAAABMCAYAAADHl1ErAAAAAXNSR0IArs4c6QAADhtJREFUeAHtXHlwVdUZ/859jyxmIQESyCaglC0iAgkJIntrIpvKphSwY2ttxbFOp9R/cGGqdhykLaMVO2OtoyRSCEKNEpYKyBIVQ1iNkBhNMCtb8shiQpJ3b7/fTW7m5uUlecu9L4nTM5Pce8895zvf93vnnPud833fEdQLKXb5jsC6%2BuZERZbHKaSMYRbGKERxgpQQUkSIIigEbAmFavlfrUKiVhCVcFa%2BIJEvJOlCcNCAnNKMFQ0o58vEfPgmhS5Mn0ot8n2KIs8lIZJJUfy8almIJqbxhRDSIbJKe2s%2BXvWlV/RcrGwqYGGp20bI1LyaeVmjKMrodp4EycGBAy6MjgsrSxozqG7O5GgxcVREeEigNDAwwBpmsUiRKGu3y1caGltstQ3yjbOFV6sPnypXTuRXBReU2GLqGprHkUKSRlMIUcD3WyUakGbbt7JYyzf6agpgYfe9O8kui/U8nB7UhJIkUTljwrBTTz449mZKUlyCEBTnjTCKQiX7T5ScfGP3Rf9j5ysny7IyTKXHPwYP690WSXnZtvcXp71pw1ldQwELm59%2BlyzbX%2BbeNL%2Btscb4EYOyNz2ZWD99wtAFnGdxxoQBefbs85f3rHsjJyivuGo60wsATe51WZJkWW/LWnXGgDZUEoYAFr58x0B7beOLPHGv5XnFIpGoS0mKOfze%2Bpmj/f2smNR9lm42teQ/8vLRgv0nyuZwVwtm1Ows5BZLSMBz1RkrbnjLiNeAhaWmPWgn%2BxYeejwkRMu9idH7tm%2BYE8/z0EhvmfOmPs9/RQ9tOJx3IKc8lUixkqBKC1nW2vat3u0NXY8Bi1%2B%2Bw6%2BktnETD7%2BnwEB4iP/pL/5xf03U4IBZ3jBkdN2K641Hkn/7YWh17c1JoM3D9PW4kIB1eRkrmjxpyyPAeK4aLttbPuAhOIU5aHpm1cTMZ1ffuRT8eMKED%2BooL6Wd%2B2Bj%2BtnFUGeYyVzJYl3Kc9sld9t2W8Dw%2BWkTWuz2fdxQ9ACr9P3Jfy7%2BZuSw0HnuNtwb5Ysqaw4mPJb5k%2BYW%2BVZuv9xqsaRWZ60%2B7w4vbgEWnrJ1hp3kTO5ZYUPCAnK%2B3bYiitWDWHca7O2yrI6U3r5yR8U1W2MiC2%2BzkLS4ev%2BaY67y1a749VQBYLUIZT/AGhUTduS7f68Y39/AgozgGbxDBsgCmSBbT/Jr710CDMMQPYvHf2DC2Mj9p95efA8TCNKI9MNrEGSALJAJskFGV%2BTocUhigrfbWz5jYtH4VdrAMksBdYVnI8vYJ/8q83hhmW0WEy23WKx39/Qh6LaHQXXA1xBgYc5isBL4/scCFoC3QCbIBhkhK2TGi65St4CpeharDvgaYoJnIv15GHaFQRBkg4w8p02BzF0VRH6XgEGDV5VS1rOgOvTHCb47wfXvIBtkhE4JmSG7/r3%2B3ilg6toQyx1OUEr7i56lF8zde8gIWVEPSz1g4IyGU8CwkMbaEMudNg3eWd0fXR5khcyQXcXAiYSdAMMWDY/ltVhIY23IdXr8kjqh21%2BzRKvMogUYAAtHQToBhv0sbNFg16GvLaQdmTfjGTJDdmCgYuHQSIfe07pTSqewn3V9z6qrvb1F48Crzx6xNTR4QXoE9tN4c2%2ByfufWqudC3VbmAYzNPwZrkf6dL%2B4LSm5Q9vkrVH79B6qs%2BoH8B1goatAtNCIqmOZOiabw4G5VJMNYREdhDD7ae6J0USsmtEwj3t7DYLCwK83f8WbbzauZP7/kq53SxiY7vfmfC5R24Fv6prTrDVEWgqbfEUlPLY2nlKkxGv%2BmXbFzG7H4/eE8g/tZyO92zbDSPoe1WncUgT14X4G189NimvjobnrhX6e6BQuo8DCho2crafnzB2n%2BMwe4PL5H5iVgACx4wEltli%2B1sXbA%2BGkNcmCwUN%2BY%2BI%2B3WOjZt3Lpl68cpQoefu6m4%2Bcqae7TWfTfk%2BXuVnWrvA4LFRtUVockjKxKc8sJmMJsWWsiON/U9eJvNmXTtk%2B%2BdYt5Z4WZX0p/bjYtmBbn7LURefaw%2BVuvwoQnBliTYCxu7WFskQb1WROjcvliKlibM/IMAQv8siD0643H6etiGx7NSBbYUlXCbRipgKnme859Ysl4jwwDrnKaV2SjDe%2B0tu9qnZ7KsQWch/YxVpt6KunZexieUVPDSIJjCC86k3lwyikJ0di%2BMS09/3au2iuMbuDr4mpKN2CIO%2BMLVnpgA4yAlVRX1ziV4fODrwOv2k2bDM4UVvEkXeaMJ0PyXn3/nCF0HIkAE2ADjICVpChiLArBMcSxsJHPmdmXjCTXiVZRRS19VVTdKd%2BIDA0bYCW1%2BWcRvGiMIN4Vjb1flHb1yrD8rM9LDKOlJ6RhA6ww6au%2BD3A50hcy%2Bt5sRRP8FpSYo8zqsBnDPax13oJ/ltEgafSqam5SU7NdezTtWsHrTzOShg2wYtWP3SQ5wZnNjMZA80Z9s1mkO9CtMakdDRtgJcGnFK3C869D6wY%2BRISp7loGUnROKtKkdtqxYawkzQGXdwNUN0nnrHiXGxxoJf40e0fEhdpRg29xoZT7RTRsgJV%2B8e0%2BJTdqJIwd4kZpz4pOGWN%2BG5Lq2s38wQHXMzZdq2XiAlllgP2%2BaH6yOX4xGjbAinejlVq0CG9l10T3rNT99wwnf96KMyvNuHMoDR0UaAr5dmwYK1YrhAoYXLtNaa2N6DAW5vFF6qLClGZeeHSyKXRBVMMGWLFaoUZYEPzgTWuxjfC6lROI/RgMb2bZ7JGUaOIcqWEDrDDp50MCBA0YLokDQRgx0p%2BdTezH4PDG88dxI8LotaeneU7AhZo6bPK5hwkVMERYuFDX6yLT2JDx99/fTVY2anibYiOCaPuGuayydDB%2BeUu2U30NG2AlCaFcRAmEo3QqaVLGynm30a6X5sHz2uMWksZH0pHXF9CIYeb/zho2CAqTgoMDvoTXCmJ3EI7isQRuVpw9KYqytyykhxk8qASuJoD84mNTKGvjveSLFQQwUeOaGCNE0Flqvs5o8b/9gZ8xwyMmj404NComZJyrzHtbLjTIjxZNv1X9C/S30pXqRrLVdd4lh7EjOX4oPfHAOHrzD9Np9l1RZMHnygeJ45kOZXxaPJ6byr6WueotdfAjhI73rGdu2ZXnn5oY7QM2OjZxx8hw%2BvPjCepf2bUfqJz/Llc1qHpb1OBAiosMpoFB5i%2BtOnLV%2BoTgL9ypYYZ8bZ0tOd6QmuUNbCiFMoN9GPM0TCbeXYoZcgvhr48kOyLlVF6AESf1UwV7G88jBbC/ISqsjzDb62wAC9UmydhoAaz6b/tWcIgQul7ntI8woMNCxQZstQOGSFYeqQriDeGI0Ud47jU2gIEae8kmtlZsWllpB6zNO2UXZwcg3rDXOO0jDbdhEIDoXs1zB6y1A4YHhP3iiuBMOJXh3tfJzuZ/qBbfX65nR5UGqmto8TUL2OoqAgZoWMNEY6KTMhOa%2Bt4ehCDfmxjz8c4X5y3UChp5hVk/j63Vpwuu0zdlNVTIrkuFfC1hkOobO%2B//Qw8LD/an26JDaFRsKI2KCWU76kCaOi6CoHYYnZY9d/DjAzllC/lDmFWz75EFevqdFmGIkbbL9hREsiI40yg/11wGhxex9PlXV%2BjEhatUU99ZQdUzpr%2BH08n1mkb1L%2BfiVf0rGs5Lo2nxkXT3HUPZ0S7WawAhsxrFy6HPwKJDY/zQqYehAPey1%2BDgDxfsSxkPwZPYaTmU7S7BPWDXkWLafayYLlWaaidW2cASK5nBWzJzOD3AG5YebCgqw5dvP4PoXab1Oveu3znK5xQIOPW31DZchL/6M6vv2sn%2B68scK3b1jDlo%2B6Hv6G878ij/e1M3cbtiQc3HML4vKZbWrbyTpowe3G1Z7SVH7e7cmHZmGXePSmtI4FhnQfVOAQMBNfhdse/CwvzsO/cf6ykapKlZpq0HCmlzxlc%2B6U2akK5c2XJNf3x4At3D29hdJUTrTnz0wxlwOrEIy5Kugum7BAyEtaGJwKVrH63mrSDn0besEdNTmz9XJ%2B6uGOoL%2BbAr/OXJJIoM77jryx%2Bh0iGL0mSENnc1FDX%2BO6gVWqZ2RfQ9I5oLQgj75fxO/q%2BvpJ9TnXTxlevr6cPjlyj5iUx2bb%2BsZ7UesqlgsayQWf/S8b7bHobC3QWYrv3rZ%2BwuXuhIs88/Y4v8vfWz4BvrdoBpj4BBejWE2W4/yupTGMJ%2BD21O/emf3j1t2bTNrYD8PgWkv7/FflvUwE8uFFelMAg2i8Uy05UTBlwCTAWtLUieJ8XA2MiQIxXX6xNYI%2B6XC3Wep%2Br5xz/Jsszij1qDVREprp4s4DJgGmjaMQzcUA5bgaNkRTbH3GxSf5SEVMoxRBUMlrnHMIB//ArounxbjgZZuWWtSzlokmyGkwWv4Bm8QwZ1GLpxZgUYcquHaRLgQ6A/SobJ4IiGpeyc7RE9ja55V/aKEOID5s/3R8loQjkeVsTzwmmeF2oYuFlamT5xFeII/4qh3LMmgR/oWT4/rEgPhONxWEKifUJW4mWikfpyvr5nBbNIkUQeD8BU7lm9fxyWHgDHA9fYQlzHg/0w/6qjuZzqdKwvb/J9PveiAl4Hz%2BE5q%2B8duKYXHjHSjkf6sXkqWyEZK4QFLIQ51iihWrr2CJKCeE6fzm2pax8Grm8e6acHDffth0YSLdF9CCoZvFye55okRU7gIetV1AkPuRJZSCfZUdefezJMYf3v0MhOwHVzLKlQxAWSRJlQlDr%2BzrPcUjjbGwbyBB2mCKH62/K7KwywjWM8b5CQq%2BH9x%2B%2BCSVZiFKH8eI4ldQQOz4jJ/P/Bt86QcSFPPVqZA50Qu4NwFK7i3tHK7HEEJ5reOFr5fwkK97jkk8ywAAAAAElFTkSuQmCC') # pylint: disable=line-too-long
class UserSettings(python_utils.OBJECT):
"""Value object representing a user's settings.
Attributes:
user_id: str. The unique ID of the user.
gae_id: str. The ID of the user retrieved from GAE.
email: str. The user email.
role: str. Role of the user. This is used in conjunction with
PARENT_ROLES to determine which actions the user can perform.
username: str or None. Identifiable username to display in the UI.
last_agreed_to_terms: datetime.datetime or None. When the user last
agreed to the terms of the site.
last_started_state_editor_tutorial: datetime.datetime or None. When
the user last started the state editor tutorial.
last_started_state_translation_tutorial: datetime.datetime or None. When
the user last started the state translation tutorial.
last_logged_in: datetime.datetime or None. When the user last logged in.
last_created_an_exploration: datetime.datetime or None. When the user
last created an exploration.
last_edited_an_exploration: datetime.datetime or None. When the user
last edited an exploration.
profile_picture_data_url: str or None. User uploaded profile picture as
a dataURI string.
default_dashboard: str or None. The default dashboard of the user.
user_bio: str. User-specified biography.
subject_interests: list(str) or None. Subject interests specified by
the user.
first_contribution_msec: float or None. The time in milliseconds when
the user first contributed to Oppia.
preferred_language_codes: list(str) or None. Exploration language
preferences specified by the user.
preferred_site_language_code: str or None. System language preference.
preferred_audio_language_code: str or None. Audio language preference.
"""
def __init__(
self, user_id, gae_id, email, role, username=None,
last_agreed_to_terms=None, last_started_state_editor_tutorial=None,
last_started_state_translation_tutorial=None, last_logged_in=None,
last_created_an_exploration=None, last_edited_an_exploration=None,
profile_picture_data_url=None, default_dashboard=None,
creator_dashboard_display_pref=(
constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['CARD']),
user_bio='', subject_interests=None, first_contribution_msec=None,
preferred_language_codes=None, preferred_site_language_code=None,
preferred_audio_language_code=None, deleted=False):
"""Constructs a UserSettings domain object.
Args:
user_id: str. The unique ID of the user.
gae_id: str. The ID of the user retrieved from GAE.
email: str. The user email.
role: str. Role of the user. This is used in conjunction with
PARENT_ROLES to determine which actions the user can perform.
username: str or None. Identifiable username to display in the UI.
last_agreed_to_terms: datetime.datetime or None. When the user
last agreed to the terms of the site.
last_started_state_editor_tutorial: datetime.datetime or None. When
the user last started the state editor tutorial.
last_started_state_translation_tutorial: datetime.datetime or None.
When the user last started the state translation tutorial.
last_logged_in: datetime.datetime or None. When the user last
logged in.
last_created_an_exploration: datetime.datetime or None. When the
user last created an exploration.
last_edited_an_exploration: datetime.datetime or None. When the
user last edited an exploration.
profile_picture_data_url: str or None. User uploaded profile
picture as a dataURI string.
default_dashboard: str|None. The default dashboard of the user.
creator_dashboard_display_pref: str. The creator dashboard of the
user.
user_bio: str. User-specified biography.
subject_interests: list(str) or None. Subject interests specified by
the user.
first_contribution_msec: float or None. The time in milliseconds
when the user first contributed to Oppia.
preferred_language_codes: list(str) or None. Exploration language
preferences specified by the user.
preferred_site_language_code: str or None. System language
preference.
preferred_audio_language_code: str or None. Default language used
for audio translations preference.
deleted: bool. Whether the user has requested removal of their
account.
"""
self.user_id = user_id
self.gae_id = gae_id
self.email = email
self.role = role
self.username = username
self.last_agreed_to_terms = last_agreed_to_terms
self.last_started_state_editor_tutorial = (
last_started_state_editor_tutorial)
self.last_started_state_translation_tutorial = (
last_started_state_translation_tutorial)
self.last_logged_in = last_logged_in
self.last_edited_an_exploration = last_edited_an_exploration
self.last_created_an_exploration = last_created_an_exploration
self.profile_picture_data_url = profile_picture_data_url
self.default_dashboard = default_dashboard
self.creator_dashboard_display_pref = creator_dashboard_display_pref
self.user_bio = user_bio
self.subject_interests = (
subject_interests if subject_interests else [])
self.first_contribution_msec = first_contribution_msec
self.preferred_language_codes = (
preferred_language_codes if preferred_language_codes else [])
self.preferred_site_language_code = preferred_site_language_code
self.preferred_audio_language_code = preferred_audio_language_code
self.deleted = deleted
def validate(self):
"""Checks that user_id and email fields of this UserSettings domain
object are valid.
Raises:
ValidationError: user_id is not str.
ValidationError: gae_id is not str.
ValidationError: email is not str.
ValidationError: email is invalid.
ValidationError: role is not str.
ValidationError: Given role does not exist.
"""
if not isinstance(self.user_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected user_id to be a string, received %s' % self.user_id)
if not self.user_id:
raise utils.ValidationError('No user id specified.')
if (self.gae_id is not None and
not isinstance(self.gae_id, python_utils.BASESTRING)):
raise utils.ValidationError(
'Expected gae_id to be a string, received %s' %
self.gae_id
)
if not isinstance(self.email, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected email to be a string, received %s' % self.email)
if not self.email:
raise utils.ValidationError('No user email specified.')
if ('@' not in self.email or self.email.startswith('@')
or self.email.endswith('@')):
raise utils.ValidationError(
'Invalid email address: %s' % self.email)
if not isinstance(self.role, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected role to be a string, received %s' % self.role)
if self.role not in role_services.PARENT_ROLES:
raise utils.ValidationError('Role %s does not exist.' % self.role)
if not isinstance(
self.creator_dashboard_display_pref, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected dashboard display preference to be a string, '
'received %s' % self.creator_dashboard_display_pref)
if (self.creator_dashboard_display_pref not in
list(constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS.values(
))):
raise utils.ValidationError(
'%s is not a valid value for the dashboard display '
'preferences.' % (self.creator_dashboard_display_pref))
@property
def truncated_email(self):
"""Returns truncated email by replacing last two characters before @
with period.
Returns:
str. The truncated email address of this UserSettings
domain object.
"""
first_part = self.email[: self.email.find('@')]
last_part = self.email[self.email.find('@'):]
if len(first_part) <= 1:
first_part = '..'
elif len(first_part) <= 3:
first_part = '%s..' % first_part[0]
else:
first_part = first_part[:-3] + '..'
return '%s%s' % (first_part, last_part)
@property
def normalized_username(self):
"""Returns username in lowercase or None if it does not exist.
Returns:
str or None. If this object has a 'username' property, returns
the normalized version of the username. Otherwise, returns None.
"""
return self.normalize_username(self.username)
@classmethod
def normalize_username(cls, username):
"""Returns the normalized version of the given username,
or None if the passed-in 'username' is None.
Args:
username: str. Identifiable username to display in the UI.
Returns:
str or None. The normalized version of the given username,
or None if the passed-in username is None.
"""
return username.lower() if username else None
@classmethod
def require_valid_username(cls, username):
"""Checks if the given username is valid or not.
Args:
username: str. The username to validate.
Raises:
ValidationError: An empty username is supplied.
ValidationError: The given username exceeds the maximum allowed
number of characters.
ValidationError: The given username contains non-alphanumeric
characters.
ValidationError: The given username contains reserved substrings.
"""
if not username:
raise utils.ValidationError('Empty username supplied.')
elif len(username) > constants.MAX_USERNAME_LENGTH:
raise utils.ValidationError(
'A username can have at most %s characters.'
% constants.MAX_USERNAME_LENGTH)
elif not re.match(feconf.ALPHANUMERIC_REGEX, username):
raise utils.ValidationError(
'Usernames can only have alphanumeric characters.')
else:
# Disallow usernames that contain the system usernames or the
# strings "admin" or "oppia".
reserved_usernames = set(feconf.SYSTEM_USERS.values()) | set([
'admin', 'oppia'])
for reserved_username in reserved_usernames:
if reserved_username in username.lower().strip():
raise utils.ValidationError(
'This username is not available.')
def is_user_id_correct(user_id):
"""Verify that the user ID is in a correct format.
Args:
user_id: str. The user ID to be checked.
Returns:
bool. True when the ID is in a correct format, False otherwise.
"""
return all((
user_id.islower(),
user_id.startswith('uid_'),
len(user_id) == user_models.USER_ID_LENGTH))
def is_username_taken(username):
""""Returns whether the given username has already been taken.
Args:
username: str. Identifiable username to display in the UI.
Returns:
bool. Whether the given username is taken.
"""
return user_models.UserSettingsModel.is_normalized_username_taken(
UserSettings.normalize_username(username))
def get_email_from_user_id(user_id):
"""Gets the email from a given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
str. user_email corresponding to the given user_id.
Raises:
Exception: The user is not found.
"""
user_settings = get_user_settings(user_id)
return user_settings.email
def get_email_from_username(username):
"""Gets the email for a given username.
Args:
username: str. Identifiable username to display in the UI.
Returns:
str or None. If the user with given username does not exist,
return None. Otherwise return the corresponding user_email.
"""
user_model = user_models.UserSettingsModel.get_by_normalized_username(
UserSettings.normalize_username(username))
if user_model is None:
return None
else:
return user_model.email
def get_user_id_from_username(username):
"""Gets the user_id for a given username.
Args:
username: str. Identifiable username to display in the UI.
Returns:
str or None. If the user with given username does not exist, return
None. Otherwise return the user_id corresponding to given username.
"""
user_model = user_models.UserSettingsModel.get_by_normalized_username(
UserSettings.normalize_username(username))
if user_model is None:
return None
else:
return user_model.id
def get_user_settings_from_username(username):
"""Gets the user settings for a given username.
Args:
username: str. Identifiable username to display in the UI.
Returns:
UserSettingsModel or None. The UserSettingsModel instance corresponding
to the given username, or None if no such model was found.
"""
user_model = user_models.UserSettingsModel.get_by_normalized_username(
UserSettings.normalize_username(username))
if user_model is None:
return None
else:
return get_user_settings(user_model.id)
def get_users_settings(user_ids):
"""Gets domain objects representing the settings for the given user_ids.
Args:
user_ids: list(str). The list of user_ids to get UserSettings
domain objects for.
Returns:
list(UserSettings|None). The UserSettings domain objects corresponding
to the given user ids. If the given user_id does not exist, the
corresponding entry in the returned list is None.
"""
user_settings_models = user_models.UserSettingsModel.get_multi(user_ids)
result = []
for i, model in enumerate(user_settings_models):
if user_ids[i] == feconf.SYSTEM_COMMITTER_ID:
result.append(UserSettings(
user_id=feconf.SYSTEM_COMMITTER_ID,
gae_id=feconf.SYSTEM_COMMITTER_ID,
email=feconf.SYSTEM_EMAIL_ADDRESS,
role=feconf.ROLE_ID_ADMIN,
username='admin',
last_agreed_to_terms=datetime.datetime.utcnow()
))
else:
result.append(_transform_user_settings(model))
return result
def generate_initial_profile_picture(user_id):
"""Generates a profile picture for a new user and
updates the user's settings in the datastore.
Args:
user_id: str. The unique ID of the user.
"""
user_email = get_email_from_user_id(user_id)
user_gravatar = fetch_gravatar(user_email)
update_profile_picture_data_url(user_id, user_gravatar)
def get_gravatar_url(email):
"""Returns the gravatar url for the specified email.
Args:
email: str. The user email.
Returns:
str. The gravatar url for the specified email.
"""
return (
'https://www.gravatar.com/avatar/%s?d=identicon&s=%s' %
(hashlib.md5(email).hexdigest(), GRAVATAR_SIZE_PX))
def fetch_gravatar(email):
"""Returns the gravatar corresponding to the user's email, or an
identicon generated from the email if the gravatar doesn't exist.
Args:
email: str. The user email.
Returns:
str. The gravatar url corresponding to the given user email. If the call
to the gravatar service fails, this returns DEFAULT_IDENTICON_DATA_URL
and logs an error.
"""
gravatar_url = get_gravatar_url(email)
try:
result = urlfetch.fetch(
gravatar_url,
headers={'Content-Type': 'image/png'},
follow_redirects=False)
except (urlfetch.InvalidURLError, urlfetch.DownloadError):
logging.error('Failed to fetch Gravatar from %s' % gravatar_url)
else:
if result.status_code == 200:
if imghdr.what(None, h=result.content) == 'png':
return utils.convert_png_binary_to_data_url(result.content)
else:
logging.error(
'[Status %s] Failed to fetch Gravatar from %s' %
(result.status_code, gravatar_url))
return DEFAULT_IDENTICON_DATA_URL
def get_user_settings(user_id, strict=False):
"""Return the user settings for a single user.
Args:
user_id: str. The unique ID of the user.
strict: bool. Whether to fail noisily if no user with the given
id exists in the datastore. Defaults to False.
Returns:
UserSettings or None. If the given user_id does not exist and strict
is False, returns None. Otherwise, returns the corresponding
UserSettings domain object.
Raises:
Exception: strict is True and given user_id does not exist.
"""
user_settings = get_users_settings([user_id])[0]
if strict and user_settings is None:
logging.error('Could not find user with id %s' % user_id)
raise Exception('User not found.')
return user_settings
def get_user_settings_by_gae_id(gae_id, strict=False):
"""Return the user settings for a single user.
Args:
gae_id: str. The GAE user ID of the user.
strict: bool. Whether to fail noisily if no user with the given
id exists in the datastore. Defaults to False.
Returns:
UserSettings or None. If the given gae_id does not exist and strict
is False, returns None. Otherwise, returns the corresponding
UserSettings domain object.
Raises:
Exception: strict is True and given gae_id does not exist.
"""
user_settings = _transform_user_settings(
user_models.UserSettingsModel.get_by_gae_id(gae_id))
if strict and user_settings is None:
logging.error('Could not find user with id %s' % gae_id)
raise Exception('User not found.')
return user_settings
def get_user_role_from_id(user_id):
"""Returns role of the user with given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
str. Role of the user with given id.
"""
user_settings = get_user_settings(user_id, strict=False)
if user_settings is None:
return feconf.ROLE_ID_GUEST
return user_settings.role
def get_user_community_rights(user_id):
"""Returns the UserCommunityRights domain object for the given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
UserCommunityRights. The UserCommunityRights domain object for the
corresponding user.
"""
user_model = (
user_models.UserCommunityRightsModel.get_by_id(user_id))
if user_model is not None:
return user_domain.UserCommunityRights(
user_id,
user_model.can_review_translation_for_language_codes,
user_model.can_review_voiceover_for_language_codes,
user_model.can_review_questions)
else:
return user_domain.UserCommunityRights(user_id, [], [], False)
def get_all_community_reviewers():
"""Returns a list of UserCommunityRights objects corresponding to each
UserCommunityRightsModel.
Returns:
list(UserCommunityRights). A list of UserCommunityRights objects.
"""
reviewer_models = user_models.UserCommunityRightsModel.get_all()
return [user_domain.UserCommunityRights(
model.id, model.can_review_translation_for_language_codes,
model.can_review_voiceover_for_language_codes,
model.can_review_questions) for model in reviewer_models]
def _save_user_community_rights(user_community_rights):
"""Saves the UserCommunityRights object into the datastore.
Args:
user_community_rights: UserCommunityRights. The UserCommunityRights
object of the user.
"""
# TODO(#8794): Add limitation on number of reviewers allowed in any
# category.
user_community_rights.validate()
user_models.UserCommunityRightsModel(
id=user_community_rights.id,
can_review_translation_for_language_codes=(
user_community_rights.can_review_translation_for_language_codes),
can_review_voiceover_for_language_codes=(
user_community_rights.can_review_voiceover_for_language_codes),
can_review_questions=user_community_rights.can_review_questions).put()
def _update_user_community_rights(user_community_rights):
"""Updates the users rights model if the updated object has review rights in
at least one item else delete the existing model.
Args:
user_community_rights: UserCommunityRights. The updated
UserCommunityRights object of the user.
"""
if user_community_rights.can_review_at_least_one_item():
_save_user_community_rights(user_community_rights)
else:
remove_community_reviewer(user_community_rights.id)
def get_usernames_by_role(role):
"""Get usernames of all the users with given role ID.
Args:
role: str. The role ID of users requested.
Returns:
list(str). List of usernames of users with given role ID.
"""
user_settings = user_models.UserSettingsModel.get_by_role(role)
return [user.username for user in user_settings]
def get_user_ids_by_role(role):
"""Get user ids of all the users with given role ID.
Args:
role: str. The role ID of users requested.
Returns:
list(str). List of user ids of users with given role ID.
"""
user_settings = user_models.UserSettingsModel.get_by_role(role)
return [user.id for user in user_settings]
class UserActionsInfo(python_utils.OBJECT):
"""A class representing information of user actions.
Attributes:
user_id: str. The unique ID of the user.
role: str. The role ID of the user.
actions: list(str). A list of actions accessible to the role.
"""
def __init__(self, user_id=None):
self._user_id = user_id
self._role = get_user_role_from_id(user_id)
self._actions = role_services.get_all_actions(self._role)
@property
def user_id(self):
"""Returns the unique ID of the user.
Returns:
user_id: str. The unique ID of the user.
"""
return self._user_id
@property
def role(self):
"""Returns the role ID of user.
Returns:
role: str. The role ID of the user.
"""
return self._role
@property
def actions(self):
"""Returns list of actions accessible to a user.
Returns:
actions: list(str). List of actions accessible to a user ID.
"""
return self._actions
def get_system_user():
"""Returns user object with system committer user id.
Returns:
system_user: user object with system committer user id.
"""
system_user = UserActionsInfo(feconf.SYSTEM_COMMITTER_ID)
return system_user
def _save_user_settings(user_settings):
"""Commits a user settings object to the datastore.
Args:
user_settings: UserSettings domain object.
"""
user_settings.validate()
user_settings_dict = {
'gae_id': user_settings.gae_id,
'email': user_settings.email,
'role': user_settings.role,
'username': user_settings.username,
'normalized_username': user_settings.normalized_username,
'last_agreed_to_terms': user_settings.last_agreed_to_terms,
'last_started_state_editor_tutorial': (
user_settings.last_started_state_editor_tutorial),
'last_started_state_translation_tutorial': (
user_settings.last_started_state_translation_tutorial),
'last_logged_in': user_settings.last_logged_in,
'last_edited_an_exploration': user_settings.last_edited_an_exploration,
'last_created_an_exploration': (
user_settings.last_created_an_exploration),
'profile_picture_data_url': user_settings.profile_picture_data_url,
'default_dashboard': user_settings.default_dashboard,
'creator_dashboard_display_pref': (
user_settings.creator_dashboard_display_pref),
'user_bio': user_settings.user_bio,
'subject_interests': user_settings.subject_interests,
'first_contribution_msec': user_settings.first_contribution_msec,
'preferred_language_codes': user_settings.preferred_language_codes,
'preferred_site_language_code': (
user_settings.preferred_site_language_code),
'preferred_audio_language_code': (
user_settings.preferred_audio_language_code),
'deleted': user_settings.deleted
}
# If user with the given user_id already exists, update that model
# with the given user settings, otherwise, create a new one.
user_model = user_models.UserSettingsModel.get_by_id(user_settings.user_id)
if user_model is not None:
user_model.populate(**user_settings_dict)
user_model.put()
else:
user_settings_dict['id'] = user_settings.user_id
user_models.UserSettingsModel(**user_settings_dict).put()
def _transform_user_settings(user_settings_model):
"""Transform user settings storage model to domain object.
Args:
user_settings_model: UserSettingsModel.
Returns:
UserSettings. Domain object for user settings.
"""
if user_settings_model:
return UserSettings(
user_id=user_settings_model.id,
gae_id=user_settings_model.gae_id,
email=user_settings_model.email,
role=user_settings_model.role,
username=user_settings_model.username,
last_agreed_to_terms=user_settings_model.last_agreed_to_terms,
last_started_state_editor_tutorial=(
user_settings_model.last_started_state_editor_tutorial),
last_started_state_translation_tutorial=(
user_settings_model.last_started_state_translation_tutorial),
last_logged_in=user_settings_model.last_logged_in,
last_edited_an_exploration=(
user_settings_model.last_edited_an_exploration),
last_created_an_exploration=(
user_settings_model.last_created_an_exploration),
profile_picture_data_url=(
user_settings_model.profile_picture_data_url),
default_dashboard=user_settings_model.default_dashboard,
creator_dashboard_display_pref=(
user_settings_model.creator_dashboard_display_pref),
user_bio=user_settings_model.user_bio,
subject_interests=user_settings_model.subject_interests,
first_contribution_msec=(
user_settings_model.first_contribution_msec),
preferred_language_codes=(
user_settings_model.preferred_language_codes),
preferred_site_language_code=(
user_settings_model.preferred_site_language_code),
preferred_audio_language_code=(
user_settings_model.preferred_audio_language_code),
deleted=user_settings_model.deleted
)
else:
return None
def is_user_registered(user_id):
"""Checks if a user is registered with the given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. Whether a user with the given user_id is registered.
"""
if user_id is None:
return False
user_settings = user_models.UserSettingsModel.get(user_id, strict=False)
return bool(user_settings)
def has_ever_registered(user_id):
"""Checks if a user has ever been registered with given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. Whether a user with the given user_id has ever been registered.
"""
user_settings = get_user_settings(user_id, strict=True)
return bool(user_settings.username and user_settings.last_agreed_to_terms)
def has_fully_registered(user_id):
"""Checks if a user has fully registered.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. Whether a user with the given user_id has fully registered.
"""
if user_id is None:
return False
user_settings = get_user_settings(user_id, strict=True)
return user_settings.username and user_settings.last_agreed_to_terms and (
user_settings.last_agreed_to_terms >=
feconf.REGISTRATION_PAGE_LAST_UPDATED_UTC)
def create_new_user(gae_id, email):
"""Creates a new user.
Args:
gae_id: str. The unique GAE user ID of the user.
email: str. The user email.
Returns:
UserSettings. The newly-created user settings domain object.
Raises:
Exception: If a user with the given gae_id already exists.
"""
user_settings = get_user_settings(gae_id, strict=False)
if user_settings is not None:
raise Exception('User %s already exists.' % gae_id)
user_id = user_models.UserSettingsModel.get_new_id('')
user_settings = UserSettings(
user_id, gae_id, email, feconf.ROLE_ID_EXPLORATION_EDITOR,
preferred_language_codes=[constants.DEFAULT_LANGUAGE_CODE])
_save_user_settings(user_settings)
create_user_contributions(user_id, [], [])
return user_settings
def get_username(user_id):
"""Gets username corresponding to the given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
str. Username corresponding to the given user_id.
"""
if user_id in feconf.SYSTEM_USERS:
return feconf.SYSTEM_USERS[user_id]
return get_user_settings(user_id, strict=True).username
def get_usernames(user_ids):
"""Gets usernames corresponding to the given user_ids.
Args:
user_ids: list(str). The list of user_ids to get usernames for.
Returns:
list(str|None). Containing usernames based on given user_ids.
If a user_id does not exist, the corresponding entry in the
returned list is None.
"""
usernames = [None] * len(user_ids)
non_system_user_indices = []
non_system_user_ids = []
for index, user_id in enumerate(user_ids):
if user_id in feconf.SYSTEM_USERS:
usernames[index] = feconf.SYSTEM_USERS[user_id]
else:
non_system_user_indices.append(index)
non_system_user_ids.append(user_id)
non_system_users_settings = get_users_settings(non_system_user_ids)
for index, user_settings in enumerate(non_system_users_settings):
if user_settings:
usernames[non_system_user_indices[index]] = user_settings.username
return usernames
def set_username(user_id, new_username):
"""Updates the username of the user with the given user_id.
Args:
user_id: str. The unique ID of the user.
new_username: str. The new username to set.
Raises:
ValidationError: The new_username supplied is already taken.
"""
user_settings = get_user_settings(user_id, strict=True)
UserSettings.require_valid_username(new_username)
if is_username_taken(new_username):
raise utils.ValidationError(
'Sorry, the username \"%s\" is already taken! Please pick '
'a different one.' % new_username)
user_settings.username = new_username
_save_user_settings(user_settings)
def record_agreement_to_terms(user_id):
"""Records that the user with given user_id has agreed to the license terms.
Args:
user_id: str. The unique ID of the user.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.last_agreed_to_terms = datetime.datetime.utcnow()
_save_user_settings(user_settings)
def update_profile_picture_data_url(user_id, profile_picture_data_url):
"""Updates profile_picture_data_url of user with given user_id.
Args:
user_id: str. The unique ID of the user.
profile_picture_data_url: str. New profile picture url to be set.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.profile_picture_data_url = profile_picture_data_url
_save_user_settings(user_settings)
def update_user_bio(user_id, user_bio):
"""Updates user_bio of user with given user_id.
Args:
user_id: str. The unique ID of the user.
user_bio: str. New user biography to be set.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.user_bio = user_bio
_save_user_settings(user_settings)
def update_user_default_dashboard(user_id, default_dashboard):
"""Updates the default dashboard of user with given user id.
Args:
user_id: str. The unique ID of the user.
default_dashboard: str. The dashboard the user wants.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.default_dashboard = default_dashboard
_save_user_settings(user_settings)
def update_user_creator_dashboard_display(
user_id, creator_dashboard_display_pref):
"""Updates the creator dashboard preference of user with given user id.
Args:
user_id: str. The unique ID of the user.
creator_dashboard_display_pref: str. The creator dashboard preference
the user wants.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.creator_dashboard_display_pref = (
creator_dashboard_display_pref)
_save_user_settings(user_settings)
def update_subject_interests(user_id, subject_interests):
"""Updates subject_interests of user with given user_id.
Args:
user_id: str. The unique ID of the user.
subject_interests: list(str). New subject interests to be set.
"""
if not isinstance(subject_interests, list):
raise utils.ValidationError('Expected subject_interests to be a list.')
else:
for interest in subject_interests:
if not isinstance(interest, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected each subject interest to be a string.')
elif not interest:
raise utils.ValidationError(
'Expected each subject interest to be non-empty.')
elif not re.match(constants.TAG_REGEX, interest):
raise utils.ValidationError(
'Expected each subject interest to consist only of '
'lowercase alphabetic characters and spaces.')
if len(set(subject_interests)) != len(subject_interests):
raise utils.ValidationError(
'Expected each subject interest to be distinct.')
user_settings = get_user_settings(user_id, strict=True)
user_settings.subject_interests = subject_interests
_save_user_settings(user_settings)
def _update_first_contribution_msec(user_id, first_contribution_msec):
"""Updates first_contribution_msec of user with given user_id.
Args:
user_id: str. The unique ID of the user.
first_contribution_msec: float. New time to set in milliseconds
representing user's first contribution to Oppia.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.first_contribution_msec = first_contribution_msec
_save_user_settings(user_settings)
def update_first_contribution_msec_if_not_set(user_id, first_contribution_msec):
"""Updates first_contribution_msec of user with given user_id
if it is set to None.
Args:
user_id: str. The unique ID of the user.
first_contribution_msec: float. New time to set in milliseconds
representing user's first contribution to Oppia.
"""
user_settings = get_user_settings(user_id, strict=True)
if user_settings.first_contribution_msec is None:
_update_first_contribution_msec(
user_id, first_contribution_msec)
def update_preferred_language_codes(user_id, preferred_language_codes):
"""Updates preferred_language_codes of user with given user_id.
Args:
user_id: str. The unique ID of the user.
preferred_language_codes: list(str). New exploration language
preferences to set.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.preferred_language_codes = preferred_language_codes
_save_user_settings(user_settings)
def update_preferred_site_language_code(user_id, preferred_site_language_code):
"""Updates preferred_site_language_code of user with given user_id.
Args:
user_id: str. The unique ID of the user.
preferred_site_language_code: str. New system language preference
to set.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.preferred_site_language_code = (
preferred_site_language_code)
_save_user_settings(user_settings)
def update_preferred_audio_language_code(
user_id, preferred_audio_language_code):
"""Updates preferred_audio_language_code of user with given user_id.
Args:
user_id: str. The unique ID of the user.
preferred_audio_language_code: str. New audio language preference
to set.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.preferred_audio_language_code = (
preferred_audio_language_code)
_save_user_settings(user_settings)
def update_user_role(user_id, role):
"""Updates the role of the user with given user_id.
Args:
user_id: str. The unique ID of the user whose role is to be updated.
role: str. The role to be assigned to user with given id.
Raises:
Exception: The given role does not exist.
"""
if role not in role_services.PARENT_ROLES:
raise Exception('Role %s does not exist.' % role)
user_settings = get_user_settings(user_id, strict=True)
user_settings.role = role
_save_user_settings(user_settings)
def mark_user_for_deletion(
user_id, exploration_ids, collection_ids):
"""Set deleted of the user with given user_id to True and create
PendingDeletionRequestModel for that user.
Args:
user_id: str. The unique ID of the user who should be deleted.
exploration_ids: list(str). List of exploration ids that were soft
deleted and should be hard deleted later.
collection_ids: list(str). List of collection ids that were soft
deleted and should be hard deleted later.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.deleted = True
_save_user_settings(user_settings)
user_models.PendingDeletionRequestModel(
id=user_id,
email=user_settings.email,
exploration_ids=exploration_ids,
collection_ids=collection_ids,
).put()
def get_human_readable_user_ids(user_ids):
"""Converts the given ids to usernames, or truncated email addresses.
Requires all users to be known.
Args:
user_ids: list(str). The list of user_ids to get UserSettings domain
objects for.
Returns:
list(str). List of usernames corresponding to given user_ids. If
username does not exist, the corresponding entry in the returned
list is the user's truncated email address.
Raises:
Exception: At least one of the user_ids does not correspond to a valid
UserSettingsModel.
"""
users_settings = get_users_settings(user_ids)
usernames = []
for ind, user_settings in enumerate(users_settings):
if user_settings is None:
logging.error('User id %s not known in list of user_ids %s' % (
user_ids[ind], user_ids))
raise Exception('User not found.')
elif user_settings.user_id == feconf.SYSTEM_COMMITTER_ID:
usernames.append('admin')
elif user_settings.username:
usernames.append(user_settings.username)
else:
usernames.append(
'[Awaiting user registration: %s]' %
user_settings.truncated_email)
return usernames
def record_user_started_state_editor_tutorial(user_id):
"""Updates last_started_state_editor_tutorial to the current datetime
for the user with given user_id.
Args:
user_id: str. The unique ID of the user.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.last_started_state_editor_tutorial = (
datetime.datetime.utcnow())
_save_user_settings(user_settings)
def record_user_started_state_translation_tutorial(user_id):
"""Updates last_started_state_translation_tutorial to the current datetime
for the user with given user_id.
Args:
user_id: str. The unique ID of the user.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.last_started_state_translation_tutorial = (
datetime.datetime.utcnow())
_save_user_settings(user_settings)
def record_user_logged_in(user_id):
"""Updates last_logged_in to the current datetime for the user with
given user_id.
Args:
user_id: str. The unique ID of the user.
"""
user_settings = get_user_settings(user_id, strict=True)
user_settings.last_logged_in = datetime.datetime.utcnow()
_save_user_settings(user_settings)
def update_last_logged_in(user_settings, new_last_logged_in):
"""Updates last_logged_in to the new given datetime for the user with
given user_settings. Should only be used by tests.
Args:
user_settings: UserSettings. The UserSettings domain object.
new_last_logged_in: datetime or None. The new datetime of the last
logged in session.
"""
user_settings.last_logged_in = new_last_logged_in
_save_user_settings(user_settings)
def record_user_edited_an_exploration(user_id):
"""Updates last_edited_an_exploration to the current datetime for
the user with given user_id.
Args:
user_id: str. The unique ID of the user.
"""
user_settings = get_user_settings(user_id)
if user_settings:
user_settings.last_edited_an_exploration = datetime.datetime.utcnow()
_save_user_settings(user_settings)
def record_user_created_an_exploration(user_id):
"""Updates last_created_an_exploration to the current datetime for
the user with given user_id.
Args:
user_id: str. The unique ID of the user.
"""
user_settings = get_user_settings(user_id)
if user_settings:
user_settings.last_created_an_exploration = datetime.datetime.utcnow()
_save_user_settings(user_settings)
def update_email_preferences(
user_id, can_receive_email_updates, can_receive_editor_role_email,
can_receive_feedback_email, can_receive_subscription_email):
"""Updates whether the user has chosen to receive email updates.
If no UserEmailPreferencesModel exists for this user, a new one will
be created.
Args:
user_id: str. The unique ID of the user.
can_receive_email_updates: bool. Whether the given user can receive
email updates.
can_receive_editor_role_email: bool. Whether the given user can receive
emails notifying them of role changes.
can_receive_feedback_email: bool. Whether the given user can receive
emails when users submit feedback to their explorations.
can_receive_subscription_email: bool. Whether the given user can receive
emails related to his/her creator subscriptions.
"""
email_preferences_model = user_models.UserEmailPreferencesModel.get(
user_id, strict=False)
if email_preferences_model is None:
email_preferences_model = user_models.UserEmailPreferencesModel(
id=user_id)
email_preferences_model.site_updates = can_receive_email_updates
email_preferences_model.editor_role_notifications = (
can_receive_editor_role_email)
email_preferences_model.feedback_message_notifications = (
can_receive_feedback_email)
email_preferences_model.subscription_notifications = (
can_receive_subscription_email)
email_preferences_model.put()
def get_email_preferences(user_id):
"""Gives email preferences of user with given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
UserGlobalPrefs. Representing whether the user has chosen to receive
email updates.
"""
email_preferences_model = user_models.UserEmailPreferencesModel.get(
user_id, strict=False)
if email_preferences_model is None:
return user_domain.UserGlobalPrefs.create_default_prefs()
else:
return user_domain.UserGlobalPrefs(
email_preferences_model.site_updates,
email_preferences_model.editor_role_notifications,
email_preferences_model.feedback_message_notifications,
email_preferences_model.subscription_notifications)
def flush_migration_bot_contributions_model():
"""Cleans migration bot contributions model."""
user_contributions = get_user_contributions(
feconf.MIGRATION_BOT_USER_ID, strict=False)
if user_contributions is not None:
user_contributions.edited_exploration_ids = []
user_contributions.created_exploration_ids = []
_save_user_contributions(user_contributions)
def get_users_email_preferences(user_ids):
"""Get email preferences for the list of users.
Args:
user_ids: list(str). A list of user IDs for whom we want to get email
preferences.
Returns:
list(UserGlobalPrefs). Representing whether the users had chosen to
receive email updates.
"""
user_email_preferences_models = (
user_models.UserEmailPreferencesModel.get_multi(user_ids))
result = []
for email_preferences_model in user_email_preferences_models:
if email_preferences_model is None:
result.append(
user_domain.UserGlobalPrefs.create_default_prefs())
else:
result.append(user_domain.UserGlobalPrefs(
email_preferences_model.site_updates,
email_preferences_model.editor_role_notifications,
email_preferences_model.feedback_message_notifications,
email_preferences_model.subscription_notifications))
return result
def set_email_preferences_for_exploration(
user_id, exploration_id, mute_feedback_notifications=None,
mute_suggestion_notifications=None):
"""Sets mute preferences for exploration with given exploration_id of user
with given user_id.
If no ExplorationUserDataModel exists for this user and exploration,
a new one will be created.
Args:
user_id: str. The unique ID of the user.
exploration_id: str. The exploration id.
mute_feedback_notifications: bool. Whether the given user has muted
feedback emails. Defaults to None.
mute_suggestion_notifications: bool. Whether the given user has muted
suggestion emails. Defaults to None.
"""
exploration_user_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
if exploration_user_model is None:
exploration_user_model = user_models.ExplorationUserDataModel.create(
user_id, exploration_id)
if mute_feedback_notifications is not None:
exploration_user_model.mute_feedback_notifications = (
mute_feedback_notifications)
if mute_suggestion_notifications is not None:
exploration_user_model.mute_suggestion_notifications = (
mute_suggestion_notifications)
exploration_user_model.put()
def get_email_preferences_for_exploration(user_id, exploration_id):
"""Gives mute preferences for exploration with given exploration_id of user
with given user_id.
Args:
user_id: str. The unique ID of the user.
exploration_id: str. The exploration id.
Returns:
UserExplorationPrefs. Representing whether the user has chosen to
receive email updates for particular exploration.
"""
exploration_user_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
if exploration_user_model is None:
return user_domain.UserExplorationPrefs.create_default_prefs()
else:
return user_domain.UserExplorationPrefs(
exploration_user_model.mute_feedback_notifications,
exploration_user_model.mute_suggestion_notifications)
def get_users_email_preferences_for_exploration(user_ids, exploration_id):
"""Gives mute preferences for exploration with given exploration_id of user
with given user_id.
Args:
user_ids: list(str). A list of user IDs for whom we want to get email
preferences.
exploration_id: str. The exploration id.
Returns:
list(UserExplorationPrefs). Representing whether the users has chosen to
receive email updates for particular exploration.
"""
exploration_user_models = (
user_models.ExplorationUserDataModel.get_multi(
user_ids, exploration_id))
result = []
for exploration_user_model in exploration_user_models:
if exploration_user_model is None:
result.append(
user_domain.UserExplorationPrefs.create_default_prefs())
else:
result.append(user_domain.UserExplorationPrefs(
exploration_user_model.mute_feedback_notifications,
exploration_user_model.mute_suggestion_notifications))
return result
class UserContributions(python_utils.OBJECT):
"""Value object representing a user's contributions.
Attributes:
user_id: str. The unique ID of the user.
created_exploration_ids: list(str). IDs of explorations that this
user has created.
edited_exploration_ids: list(str). IDs of explorations that this
user has edited.
"""
def __init__(
self, user_id, created_exploration_ids, edited_exploration_ids):
"""Constructs a UserContributions domain object.
Args:
user_id: str. The unique ID of the user.
created_exploration_ids: list(str). IDs of explorations that this
user has created.
edited_exploration_ids: list(str). IDs of explorations that this
user has edited.
"""
self.user_id = user_id
self.created_exploration_ids = created_exploration_ids
self.edited_exploration_ids = edited_exploration_ids
def validate(self):
"""Checks that user_id, created_exploration_ids and
edited_exploration_ids fields of this UserContributions
domain object are valid.
Raises:
ValidationError: user_id is not str.
ValidationError: created_exploration_ids is not a list.
ValidationError: exploration_id in created_exploration_ids
is not str.
ValidationError: edited_exploration_ids is not a list.
ValidationError: exploration_id in edited_exploration_ids
is not str.
"""
if not isinstance(self.user_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected user_id to be a string, received %s' % self.user_id)
if not self.user_id:
raise utils.ValidationError('No user id specified.')
if not isinstance(self.created_exploration_ids, list):
raise utils.ValidationError(
'Expected created_exploration_ids to be a list, received %s'
% self.created_exploration_ids)
for exploration_id in self.created_exploration_ids:
if not isinstance(exploration_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected exploration_id in created_exploration_ids '
'to be a string, received %s' % (
exploration_id))
if not isinstance(self.edited_exploration_ids, list):
raise utils.ValidationError(
'Expected edited_exploration_ids to be a list, received %s'
% self.edited_exploration_ids)
for exploration_id in self.edited_exploration_ids:
if not isinstance(exploration_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected exploration_id in edited_exploration_ids '
'to be a string, received %s' % (
exploration_id))
def get_user_contributions(user_id, strict=False):
"""Gets domain object representing the contributions for the given user_id.
Args:
user_id: str. The unique ID of the user.
strict: bool. Whether to fail noisily if no user with the given
id exists in the datastore. Defaults to False.
Returns:
UserContributions or None. If the given user_id does not exist, return
None. Otherwise, return the corresponding UserContributions domain
object.
"""
model = user_models.UserContributionsModel.get(user_id, strict=strict)
if model is not None:
result = UserContributions(
model.id, model.created_exploration_ids,
model.edited_exploration_ids)
else:
result = None
return result
def create_user_contributions(
user_id, created_exploration_ids, edited_exploration_ids):
"""Creates a new UserContributionsModel and returns the domain object.
Args:
user_id: str. The unique ID of the user.
created_exploration_ids: list(str). IDs of explorations that this
user has created.
edited_exploration_ids: list(str). IDs of explorations that this
user has edited.
Returns:
UserContributions. The domain object representing the newly-created
UserContributionsModel.
Raises:
Exception: The UserContributionsModel for the given user_id already
exists.
"""
user_contributions = get_user_contributions(user_id, strict=False)
if user_contributions:
raise Exception(
'User contributions model for user %s already exists.' % user_id)
else:
user_contributions = UserContributions(
user_id, created_exploration_ids, edited_exploration_ids)
_save_user_contributions(user_contributions)
return user_contributions
def update_user_contributions(
user_id, created_exploration_ids, edited_exploration_ids):
"""Updates an existing UserContributionsModel with new calculated
contributions.
Args:
user_id: str. The unique ID of the user.
created_exploration_ids: list(str). IDs of explorations that this
user has created.
edited_exploration_ids: list(str). IDs of explorations that this
user has edited.
Raises:
Exception: The UserContributionsModel for the given user_id does not
exist.
"""
user_contributions = get_user_contributions(user_id, strict=False)
if not user_contributions:
raise Exception(
'User contributions model for user %s does not exist.' % user_id)
user_contributions.created_exploration_ids = created_exploration_ids
user_contributions.edited_exploration_ids = edited_exploration_ids
_save_user_contributions(user_contributions)
def add_created_exploration_id(user_id, exploration_id):
"""Adds an exploration_id to a user_id's UserContributionsModel collection
of created explorations.
Args:
user_id: str. The unique ID of the user.
exploration_id: str. The exploration id.
"""
user_contributions = get_user_contributions(user_id, strict=False)
if not user_contributions:
create_user_contributions(user_id, [exploration_id], [])
elif exploration_id not in user_contributions.created_exploration_ids:
user_contributions.created_exploration_ids.append(exploration_id)
user_contributions.created_exploration_ids.sort()
_save_user_contributions(user_contributions)
def add_edited_exploration_id(user_id, exploration_id):
"""Adds an exploration_id to a user_id's UserContributionsModel collection
of edited explorations.
Args:
user_id: str. The unique ID of the user.
exploration_id: str. The exploration id.
"""
user_contributions = get_user_contributions(user_id, strict=False)
if not user_contributions:
create_user_contributions(user_id, [], [exploration_id])
elif exploration_id not in user_contributions.edited_exploration_ids:
user_contributions.edited_exploration_ids.append(exploration_id)
user_contributions.edited_exploration_ids.sort()
_save_user_contributions(user_contributions)
def _save_user_contributions(user_contributions):
"""Commits a user contributions object to the datastore.
Args:
user_contributions: UserContributions. Value object representing
a user's contributions.
"""
user_contributions.validate()
user_models.UserContributionsModel(
id=user_contributions.user_id,
created_exploration_ids=user_contributions.created_exploration_ids,
edited_exploration_ids=user_contributions.edited_exploration_ids,
).put()
def _migrate_dashboard_stats_to_latest_schema(versioned_dashboard_stats):
"""Holds responsibility of updating the structure of dashboard stats.
Args:
versioned_dashboard_stats: UserStatsModel. Value object representing
user-specific statistics.
Raises:
Exception: If schema_version > CURRENT_DASHBOARD_STATS_SCHEMA_VERSION.
"""
stats_schema_version = versioned_dashboard_stats.schema_version
if not (1 <= stats_schema_version
<= feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d dashboard stats schemas at '
'present.' % feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION)
def get_current_date_as_string():
"""Gets the current date.
Returns:
str. Current date as a string of format 'YYYY-MM-DD'.
"""
return datetime.datetime.utcnow().strftime(
feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT)
def parse_date_from_string(datetime_str):
"""Parses the given string, and returns the year, month and day of the
date that it represents.
Args:
datetime_str: str. String representing datetime.
Returns:
dict. Representing date with year, month and day as keys.
"""
datetime_obj = datetime.datetime.strptime(
datetime_str, feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT)
return {
'year': datetime_obj.year,
'month': datetime_obj.month,
'day': datetime_obj.day
}
def get_user_impact_score(user_id):
"""Gets the user impact score for the given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
float. The user impact score associated with the given user_id.
Returns 0 if UserStatsModel does not exist for the given user_id.
"""
model = user_models.UserStatsModel.get(user_id, strict=False)
if model:
return model.impact_score
else:
return 0
def get_weekly_dashboard_stats(user_id):
"""Gets weekly dashboard stats for a given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
list(dict): The weekly dashboard stats for the given user. Each dict in
the list denotes the dashboard stats of the user, keyed by a datetime
string. The stats currently being saved are:
- 'average ratings': Average of ratings across all explorations of
a user.
- 'total plays': Total number of plays across all explorations of
a user.
The format of returned value:
[
{
{{datetime_string_1}}: {
'num_ratings': (value),
'average_ratings': (value),
'total_plays': (value)
}
},
{
{{datetime_string_2}}: {
'num_ratings': (value),
'average_ratings': (value),
'total_plays': (value)
}
}
]
If the user doesn't exist, then this function returns None.
"""
model = user_models.UserStatsModel.get(user_id, strict=False)
if model and model.weekly_creator_stats_list:
return model.weekly_creator_stats_list
else:
return None
def get_last_week_dashboard_stats(user_id):
"""Gets last week's dashboard stats for a given user_id.
Args:
user_id: str. The unique ID of the user.
Returns:
dict or None: The dict denotes last week dashboard stats of the user,
and contains a single key-value pair. The key is the datetime string and
the value is the dashboard stats in the format:
{
'num_ratings': (value),
'average_ratings': (value),
'total_plays': (value)
}
If the user doesn't exist, then this function returns None.
"""
weekly_dashboard_stats = get_weekly_dashboard_stats(user_id)
if weekly_dashboard_stats:
return weekly_dashboard_stats[-1]
else:
return None
def update_dashboard_stats_log(user_id):
"""Save statistics for creator dashboard of a user by appending to a list
keyed by a datetime string.
Args:
user_id: str. The unique ID of the user.
"""
model = user_models.UserStatsModel.get_or_create(user_id)
if model.schema_version != feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION:
_migrate_dashboard_stats_to_latest_schema(model)
weekly_dashboard_stats = {
get_current_date_as_string(): {
'num_ratings': model.num_ratings or 0,
'average_ratings': model.average_ratings,
'total_plays': model.total_plays or 0
}
}
model.weekly_creator_stats_list.append(weekly_dashboard_stats)
model.put()
def is_at_least_moderator(user_id):
"""Checks if a user with given user_id is at least a moderator.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. True if user is atleast a moderator, False otherwise.
"""
user_role = get_user_role_from_id(user_id)
if (user_role == feconf.ROLE_ID_MODERATOR or
user_role == feconf.ROLE_ID_ADMIN):
return True
return False
def is_admin(user_id):
"""Checks if a user with given user_id is an admin.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. True if user is an admin, False otherwise.
"""
user_role = get_user_role_from_id(user_id)
if user_role == feconf.ROLE_ID_ADMIN:
return True
return False
def is_topic_manager(user_id):
"""Checks if a user with given user_id is a topic manager.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. Whether the user is a topic manager.
"""
user_role = get_user_role_from_id(user_id)
if user_role == feconf.ROLE_ID_TOPIC_MANAGER:
return True
return False
def can_review_translation_suggestions(user_id, language_code=None):
"""Returns whether the user can review translation suggestions in any
language or in the given language.
NOTE: If the language_code is provided then this method will check whether
the user can review translations in the given language code. Otherwise, it
will check whether the user can review in any language.
Args:
user_id: str. The unique ID of the user.
language_code: str. The code of the language.
Returns:
bool. Whether the user can review translation suggestions in any
language or in the given language.
"""
user_community_rights = get_user_community_rights(user_id)
reviewable_language_codes = (
user_community_rights.can_review_translation_for_language_codes)
if language_code is not None:
return language_code in reviewable_language_codes
else:
return bool(reviewable_language_codes)
def can_review_voiceover_applications(user_id, language_code=None):
"""Returns whether the user can review voiceover applications in any
language or in the given language.
NOTE: If the language_code is provided then this method will check whether
the user can review voiceover in the given language code else it will
check whether the user can review in any language.
Args:
user_id: str. The unique ID of the user.
language_code: str. The code of the language.
Returns:
bool. Whether the user can review voiceover applications in any language
or in the given language.
"""
user_community_rights = get_user_community_rights(user_id)
reviewable_language_codes = (
user_community_rights.can_review_voiceover_for_language_codes)
if language_code is not None:
return language_code in reviewable_language_codes
else:
return bool(reviewable_language_codes)
def can_review_question_suggestions(user_id):
"""Checks whether the user can review question suggestions.
Args:
user_id: str. The unique ID of the user.
Returns:
bool. Whether the user can review question suggestions.
"""
user_community_rights = get_user_community_rights(user_id)
return user_community_rights.can_review_questions
def allow_user_to_review_translation_in_language(user_id, language_code):
"""Allows the user with the given user id to review translation in the given
language_code.
Args:
user_id: str. The unique ID of the user.
language_code: str. The code of the language. Callers should ensure that
the user does not have rights to review translations in the given
language code.
"""
user_community_rights = get_user_community_rights(user_id)
allowed_language_codes = set(
user_community_rights.can_review_translation_for_language_codes)
allowed_language_codes.add(language_code)
user_community_rights.can_review_translation_for_language_codes = (
sorted(list(allowed_language_codes)))
_save_user_community_rights(user_community_rights)
def remove_translation_review_rights_in_language(user_id, language_code):
"""Removes the user's review rights to translation suggestions in the given
language_code.
Args:
user_id: str. The unique ID of the user.
language_code: str. The code of the language. Callers should ensure that
the user already has rights to review translations in the given
language code.
"""
user_community_rights = get_user_community_rights(user_id)
user_community_rights.can_review_translation_for_language_codes.remove(
language_code)
_update_user_community_rights(user_community_rights)
def allow_user_to_review_voiceover_in_language(user_id, language_code):
"""Allows the user with the given user id to review voiceover applications
in the given language_code.
Args:
user_id: str. The unique ID of the user.
language_code: str. The code of the language. Callers should ensure that
the user does not have rights to review voiceovers in the given
language code.
"""
user_community_rights = get_user_community_rights(user_id)
allowed_language_codes = set(
user_community_rights.can_review_voiceover_for_language_codes)
allowed_language_codes.add(language_code)
user_community_rights.can_review_voiceover_for_language_codes = (
sorted(list(allowed_language_codes)))
_save_user_community_rights(user_community_rights)
def remove_voiceover_review_rights_in_language(user_id, language_code):
"""Removes the user's review rights to voiceover applications in the given
language_code.
Args:
user_id: str. The unique ID of the user.
language_code: str. The code of the language. Callers should ensure that
the user already has rights to review voiceovers in the given
language code.
"""
user_community_rights = get_user_community_rights(user_id)
user_community_rights.can_review_voiceover_for_language_codes.remove(
language_code)
_update_user_community_rights(user_community_rights)
def allow_user_to_review_question(user_id):
"""Allows the user with the given user id to review question suggestions.
Args:
user_id: str. The unique ID of the user. Callers should ensure that
the given user does not have rights to review questions.
"""
user_community_rights = get_user_community_rights(user_id)
user_community_rights.can_review_questions = True
_save_user_community_rights(user_community_rights)
def remove_question_review_rights(user_id):
"""Removes the user's review rights to question suggestions.
Args:
user_id: str. The unique ID of the user. Callers should ensure that
the given user already has rights to review questions.
"""
user_community_rights = get_user_community_rights(user_id)
user_community_rights.can_review_questions = False
_update_user_community_rights(user_community_rights)
def remove_community_reviewer(user_id):
"""Deletes the UserCommunityRightsModel corresponding to the given user_id.
Args:
user_id: str. The unique ID of the user.
"""
user_community_rights_model = (
user_models.UserCommunityRightsModel.get_by_id(user_id))
if user_community_rights_model is not None:
user_community_rights_model.delete()
def get_community_reviewer_usernames(review_category, language_code=None):
"""Returns a list of usernames of users who has rights to review item of
given review category.
Args:
review_category: str. The review category to find the list of reviewers
for.
language_code: None|str. The language code for translation or voiceover
review category.
Returns:
list(str.) A list of usernames.
"""
reviewer_ids = []
if review_category == constants.REVIEW_CATEGORY_TRANSLATION:
reviewer_ids = (
user_models.UserCommunityRightsModel
.get_translation_reviewer_user_ids(language_code))
elif review_category == constants.REVIEW_CATEGORY_VOICEOVER:
reviewer_ids = (
user_models.UserCommunityRightsModel
.get_voiceover_reviewer_user_ids(language_code))
elif review_category == constants.REVIEW_CATEGORY_QUESTION:
if language_code is not None:
raise Exception('Expected language_code to be None, found: %s' % (
language_code))
reviewer_ids = (
user_models.UserCommunityRightsModel
.get_question_reviewer_user_ids())
else:
raise Exception('Invalid review category: %s' % review_category)
return get_usernames(reviewer_ids)
def log_username_change(committer_id, old_username, new_username):
"""Stores the query to role structure in UsernameChangeAuditModel.
Args:
committer_id: str. The ID of the user that is making the change.
old_username: str. The current username that is being changed.
new_username: str. The new username that the current one is being
changed to.
"""
model_id = '%s.%d' % (committer_id, utils.get_current_time_in_millisecs())
audit_models.UsernameChangeAuditModel(
id=model_id, committer_id=committer_id, old_username=old_username,
new_username=new_username).put()
| 39.075379 | 5,138 | 0.710539 |
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import hashlib
import imghdr
import logging
import re
from constants import constants
from core.domain import role_services
from core.domain import user_domain
from core.platform import models
import feconf
import python_utils
import utils
from google.appengine.api import urlfetch
current_user_services = models.Registry.import_current_user_services()
(user_models, audit_models) = models.Registry.import_models(
[models.NAMES.user, models.NAMES.audit])
GRAVATAR_SIZE_PX = 150
DEFAULT_IDENTICON_DATA_URL = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEwAAABMCAYAAADHl1ErAAAAAXNSR0IArs4c6QAADhtJREFUeAHtXHlwVdUZ/859jyxmIQESyCaglC0iAgkJIntrIpvKphSwY2ttxbFOp9R/cGGqdhykLaMVO2OtoyRSCEKNEpYKyBIVQ1iNkBhNMCtb8shiQpJ3b7/fTW7m5uUlecu9L4nTM5Pce8895zvf93vnnPud833fEdQLKXb5jsC6%2BuZERZbHKaSMYRbGKERxgpQQUkSIIigEbAmFavlfrUKiVhCVcFa%2BIJEvJOlCcNCAnNKMFQ0o58vEfPgmhS5Mn0ot8n2KIs8lIZJJUfy8almIJqbxhRDSIbJKe2s%2BXvWlV/RcrGwqYGGp20bI1LyaeVmjKMrodp4EycGBAy6MjgsrSxozqG7O5GgxcVREeEigNDAwwBpmsUiRKGu3y1caGltstQ3yjbOFV6sPnypXTuRXBReU2GLqGprHkUKSRlMIUcD3WyUakGbbt7JYyzf6agpgYfe9O8kui/U8nB7UhJIkUTljwrBTTz449mZKUlyCEBTnjTCKQiX7T5ScfGP3Rf9j5ysny7IyTKXHPwYP690WSXnZtvcXp71pw1ldQwELm59%2BlyzbX%2BbeNL%2Btscb4EYOyNz2ZWD99wtAFnGdxxoQBefbs85f3rHsjJyivuGo60wsATe51WZJkWW/LWnXGgDZUEoYAFr58x0B7beOLPHGv5XnFIpGoS0mKOfze%2Bpmj/f2smNR9lm42teQ/8vLRgv0nyuZwVwtm1Ows5BZLSMBz1RkrbnjLiNeAhaWmPWgn%2BxYeejwkRMu9idH7tm%2BYE8/z0EhvmfOmPs9/RQ9tOJx3IKc8lUixkqBKC1nW2vat3u0NXY8Bi1%2B%2Bw6%2BktnETD7%2BnwEB4iP/pL/5xf03U4IBZ3jBkdN2K641Hkn/7YWh17c1JoM3D9PW4kIB1eRkrmjxpyyPAeK4aLttbPuAhOIU5aHpm1cTMZ1ffuRT8eMKED%2BooL6Wd%2B2Bj%2BtnFUGeYyVzJYl3Kc9sld9t2W8Dw%2BWkTWuz2fdxQ9ACr9P3Jfy7%2BZuSw0HnuNtwb5Ysqaw4mPJb5k%2BYW%2BVZuv9xqsaRWZ60%2B7w4vbgEWnrJ1hp3kTO5ZYUPCAnK%2B3bYiitWDWHca7O2yrI6U3r5yR8U1W2MiC2%2BzkLS4ev%2BaY67y1a749VQBYLUIZT/AGhUTduS7f68Y39/AgozgGbxDBsgCmSBbT/Jr710CDMMQPYvHf2DC2Mj9p95efA8TCNKI9MNrEGSALJAJskFGV%2BTocUhigrfbWz5jYtH4VdrAMksBdYVnI8vYJ/8q83hhmW0WEy23WKx39/Qh6LaHQXXA1xBgYc5isBL4/scCFoC3QCbIBhkhK2TGi65St4CpeharDvgaYoJnIv15GHaFQRBkg4w8p02BzF0VRH6XgEGDV5VS1rOgOvTHCb47wfXvIBtkhE4JmSG7/r3%2B3ilg6toQyx1OUEr7i56lF8zde8gIWVEPSz1g4IyGU8CwkMbaEMudNg3eWd0fXR5khcyQXcXAiYSdAMMWDY/ltVhIY23IdXr8kjqh21%2BzRKvMogUYAAtHQToBhv0sbNFg16GvLaQdmTfjGTJDdmCgYuHQSIfe07pTSqewn3V9z6qrvb1F48Crzx6xNTR4QXoE9tN4c2%2ByfufWqudC3VbmAYzNPwZrkf6dL%2B4LSm5Q9vkrVH79B6qs%2BoH8B1goatAtNCIqmOZOiabw4G5VJMNYREdhDD7ae6J0USsmtEwj3t7DYLCwK83f8WbbzauZP7/kq53SxiY7vfmfC5R24Fv6prTrDVEWgqbfEUlPLY2nlKkxGv%2BmXbFzG7H4/eE8g/tZyO92zbDSPoe1WncUgT14X4G189NimvjobnrhX6e6BQuo8DCho2crafnzB2n%2BMwe4PL5H5iVgACx4wEltli%2B1sXbA%2BGkNcmCwUN%2BY%2BI%2B3WOjZt3Lpl68cpQoefu6m4%2Bcqae7TWfTfk%2BXuVnWrvA4LFRtUVockjKxKc8sJmMJsWWsiON/U9eJvNmXTtk%2B%2BdYt5Z4WZX0p/bjYtmBbn7LURefaw%2BVuvwoQnBliTYCxu7WFskQb1WROjcvliKlibM/IMAQv8siD0643H6etiGx7NSBbYUlXCbRipgKnme859Ysl4jwwDrnKaV2SjDe%2B0tu9qnZ7KsQWch/YxVpt6KunZexieUVPDSIJjCC86k3lwyikJ0di%2BMS09/3au2iuMbuDr4mpKN2CIO%2BMLVnpgA4yAlVRX1ziV4fODrwOv2k2bDM4UVvEkXeaMJ0PyXn3/nCF0HIkAE2ADjICVpChiLArBMcSxsJHPmdmXjCTXiVZRRS19VVTdKd%2BIDA0bYCW1%2BWcRvGiMIN4Vjb1flHb1yrD8rM9LDKOlJ6RhA6ww6au%2BD3A50hcy%2Bt5sRRP8FpSYo8zqsBnDPax13oJ/ltEgafSqam5SU7NdezTtWsHrTzOShg2wYtWP3SQ5wZnNjMZA80Z9s1mkO9CtMakdDRtgJcGnFK3C869D6wY%2BRISp7loGUnROKtKkdtqxYawkzQGXdwNUN0nnrHiXGxxoJf40e0fEhdpRg29xoZT7RTRsgJV%2B8e0%2BJTdqJIwd4kZpz4pOGWN%2BG5Lq2s38wQHXMzZdq2XiAlllgP2%2BaH6yOX4xGjbAinejlVq0CG9l10T3rNT99wwnf96KMyvNuHMoDR0UaAr5dmwYK1YrhAoYXLtNaa2N6DAW5vFF6qLClGZeeHSyKXRBVMMGWLFaoUZYEPzgTWuxjfC6lROI/RgMb2bZ7JGUaOIcqWEDrDDp50MCBA0YLokDQRgx0p%2BdTezH4PDG88dxI8LotaeneU7AhZo6bPK5hwkVMERYuFDX6yLT2JDx99/fTVY2anibYiOCaPuGuayydDB%2BeUu2U30NG2AlCaFcRAmEo3QqaVLGynm30a6X5sHz2uMWksZH0pHXF9CIYeb/zho2CAqTgoMDvoTXCmJ3EI7isQRuVpw9KYqytyykhxk8qASuJoD84mNTKGvjveSLFQQwUeOaGCNE0Flqvs5o8b/9gZ8xwyMmj404NComZJyrzHtbLjTIjxZNv1X9C/S30pXqRrLVdd4lh7EjOX4oPfHAOHrzD9Np9l1RZMHnygeJ45kOZXxaPJ6byr6WueotdfAjhI73rGdu2ZXnn5oY7QM2OjZxx8hw%2BvPjCepf2bUfqJz/Llc1qHpb1OBAiosMpoFB5i%2BtOnLV%2BoTgL9ypYYZ8bZ0tOd6QmuUNbCiFMoN9GPM0TCbeXYoZcgvhr48kOyLlVF6AESf1UwV7G88jBbC/ISqsjzDb62wAC9UmydhoAaz6b/tWcIgQul7ntI8woMNCxQZstQOGSFYeqQriDeGI0Ud47jU2gIEae8kmtlZsWllpB6zNO2UXZwcg3rDXOO0jDbdhEIDoXs1zB6y1A4YHhP3iiuBMOJXh3tfJzuZ/qBbfX65nR5UGqmto8TUL2OoqAgZoWMNEY6KTMhOa%2Bt4ehCDfmxjz8c4X5y3UChp5hVk/j63Vpwuu0zdlNVTIrkuFfC1hkOobO%2B//Qw8LD/an26JDaFRsKI2KCWU76kCaOi6CoHYYnZY9d/DjAzllC/lDmFWz75EFevqdFmGIkbbL9hREsiI40yg/11wGhxex9PlXV%2BjEhatUU99ZQdUzpr%2BH08n1mkb1L%2BfiVf0rGs5Lo2nxkXT3HUPZ0S7WawAhsxrFy6HPwKJDY/zQqYehAPey1%2BDgDxfsSxkPwZPYaTmU7S7BPWDXkWLafayYLlWaaidW2cASK5nBWzJzOD3AG5YebCgqw5dvP4PoXab1Oveu3znK5xQIOPW31DZchL/6M6vv2sn%2B68scK3b1jDlo%2B6Hv6G878ij/e1M3cbtiQc3HML4vKZbWrbyTpowe3G1Z7SVH7e7cmHZmGXePSmtI4FhnQfVOAQMBNfhdse/CwvzsO/cf6ykapKlZpq0HCmlzxlc%2B6U2akK5c2XJNf3x4At3D29hdJUTrTnz0wxlwOrEIy5Kugum7BAyEtaGJwKVrH63mrSDn0besEdNTmz9XJ%2B6uGOoL%2BbAr/OXJJIoM77jryx%2Bh0iGL0mSENnc1FDX%2BO6gVWqZ2RfQ9I5oLQgj75fxO/q%2BvpJ9TnXTxlevr6cPjlyj5iUx2bb%2BsZ7UesqlgsayQWf/S8b7bHobC3QWYrv3rZ%2BwuXuhIs88/Y4v8vfWz4BvrdoBpj4BBejWE2W4/yupTGMJ%2BD21O/emf3j1t2bTNrYD8PgWkv7/FflvUwE8uFFelMAg2i8Uy05UTBlwCTAWtLUieJ8XA2MiQIxXX6xNYI%2B6XC3Wep%2Br5xz/Jsszij1qDVREprp4s4DJgGmjaMQzcUA5bgaNkRTbH3GxSf5SEVMoxRBUMlrnHMIB//ArounxbjgZZuWWtSzlokmyGkwWv4Bm8QwZ1GLpxZgUYcquHaRLgQ6A/SobJ4IiGpeyc7RE9ja55V/aKEOID5s/3R8loQjkeVsTzwmmeF2oYuFlamT5xFeII/4qh3LMmgR/oWT4/rEgPhONxWEKifUJW4mWikfpyvr5nBbNIkUQeD8BU7lm9fxyWHgDHA9fYQlzHg/0w/6qjuZzqdKwvb/J9PveiAl4Hz%2BE5q%2B8duKYXHjHSjkf6sXkqWyEZK4QFLIQ51iihWrr2CJKCeE6fzm2pax8Grm8e6acHDffth0YSLdF9CCoZvFye55okRU7gIetV1AkPuRJZSCfZUdefezJMYf3v0MhOwHVzLKlQxAWSRJlQlDr%2BzrPcUjjbGwbyBB2mCKH62/K7KwywjWM8b5CQq%2BH9x%2B%2BCSVZiFKH8eI4ldQQOz4jJ/P/Bt86QcSFPPVqZA50Qu4NwFK7i3tHK7HEEJ5reOFr5fwkK97jkk8ywAAAAAElFTkSuQmCC')
class UserSettings(python_utils.OBJECT):
def __init__(
self, user_id, gae_id, email, role, username=None,
last_agreed_to_terms=None, last_started_state_editor_tutorial=None,
last_started_state_translation_tutorial=None, last_logged_in=None,
last_created_an_exploration=None, last_edited_an_exploration=None,
profile_picture_data_url=None, default_dashboard=None,
creator_dashboard_display_pref=(
constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['CARD']),
user_bio='', subject_interests=None, first_contribution_msec=None,
preferred_language_codes=None, preferred_site_language_code=None,
preferred_audio_language_code=None, deleted=False):
self.user_id = user_id
self.gae_id = gae_id
self.email = email
self.role = role
self.username = username
self.last_agreed_to_terms = last_agreed_to_terms
self.last_started_state_editor_tutorial = (
last_started_state_editor_tutorial)
self.last_started_state_translation_tutorial = (
last_started_state_translation_tutorial)
self.last_logged_in = last_logged_in
self.last_edited_an_exploration = last_edited_an_exploration
self.last_created_an_exploration = last_created_an_exploration
self.profile_picture_data_url = profile_picture_data_url
self.default_dashboard = default_dashboard
self.creator_dashboard_display_pref = creator_dashboard_display_pref
self.user_bio = user_bio
self.subject_interests = (
subject_interests if subject_interests else [])
self.first_contribution_msec = first_contribution_msec
self.preferred_language_codes = (
preferred_language_codes if preferred_language_codes else [])
self.preferred_site_language_code = preferred_site_language_code
self.preferred_audio_language_code = preferred_audio_language_code
self.deleted = deleted
def validate(self):
if not isinstance(self.user_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected user_id to be a string, received %s' % self.user_id)
if not self.user_id:
raise utils.ValidationError('No user id specified.')
if (self.gae_id is not None and
not isinstance(self.gae_id, python_utils.BASESTRING)):
raise utils.ValidationError(
'Expected gae_id to be a string, received %s' %
self.gae_id
)
if not isinstance(self.email, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected email to be a string, received %s' % self.email)
if not self.email:
raise utils.ValidationError('No user email specified.')
if ('@' not in self.email or self.email.startswith('@')
or self.email.endswith('@')):
raise utils.ValidationError(
'Invalid email address: %s' % self.email)
if not isinstance(self.role, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected role to be a string, received %s' % self.role)
if self.role not in role_services.PARENT_ROLES:
raise utils.ValidationError('Role %s does not exist.' % self.role)
if not isinstance(
self.creator_dashboard_display_pref, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected dashboard display preference to be a string, '
'received %s' % self.creator_dashboard_display_pref)
if (self.creator_dashboard_display_pref not in
list(constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS.values(
))):
raise utils.ValidationError(
'%s is not a valid value for the dashboard display '
'preferences.' % (self.creator_dashboard_display_pref))
@property
def truncated_email(self):
first_part = self.email[: self.email.find('@')]
last_part = self.email[self.email.find('@'):]
if len(first_part) <= 1:
first_part = '..'
elif len(first_part) <= 3:
first_part = '%s..' % first_part[0]
else:
first_part = first_part[:-3] + '..'
return '%s%s' % (first_part, last_part)
@property
def normalized_username(self):
return self.normalize_username(self.username)
@classmethod
def normalize_username(cls, username):
return username.lower() if username else None
@classmethod
def require_valid_username(cls, username):
if not username:
raise utils.ValidationError('Empty username supplied.')
elif len(username) > constants.MAX_USERNAME_LENGTH:
raise utils.ValidationError(
'A username can have at most %s characters.'
% constants.MAX_USERNAME_LENGTH)
elif not re.match(feconf.ALPHANUMERIC_REGEX, username):
raise utils.ValidationError(
'Usernames can only have alphanumeric characters.')
else:
reserved_usernames = set(feconf.SYSTEM_USERS.values()) | set([
'admin', 'oppia'])
for reserved_username in reserved_usernames:
if reserved_username in username.lower().strip():
raise utils.ValidationError(
'This username is not available.')
def is_user_id_correct(user_id):
return all((
user_id.islower(),
user_id.startswith('uid_'),
len(user_id) == user_models.USER_ID_LENGTH))
def is_username_taken(username):
return user_models.UserSettingsModel.is_normalized_username_taken(
UserSettings.normalize_username(username))
def get_email_from_user_id(user_id):
user_settings = get_user_settings(user_id)
return user_settings.email
def get_email_from_username(username):
user_model = user_models.UserSettingsModel.get_by_normalized_username(
UserSettings.normalize_username(username))
if user_model is None:
return None
else:
return user_model.email
def get_user_id_from_username(username):
user_model = user_models.UserSettingsModel.get_by_normalized_username(
UserSettings.normalize_username(username))
if user_model is None:
return None
else:
return user_model.id
def get_user_settings_from_username(username):
user_model = user_models.UserSettingsModel.get_by_normalized_username(
UserSettings.normalize_username(username))
if user_model is None:
return None
else:
return get_user_settings(user_model.id)
def get_users_settings(user_ids):
user_settings_models = user_models.UserSettingsModel.get_multi(user_ids)
result = []
for i, model in enumerate(user_settings_models):
if user_ids[i] == feconf.SYSTEM_COMMITTER_ID:
result.append(UserSettings(
user_id=feconf.SYSTEM_COMMITTER_ID,
gae_id=feconf.SYSTEM_COMMITTER_ID,
email=feconf.SYSTEM_EMAIL_ADDRESS,
role=feconf.ROLE_ID_ADMIN,
username='admin',
last_agreed_to_terms=datetime.datetime.utcnow()
))
else:
result.append(_transform_user_settings(model))
return result
def generate_initial_profile_picture(user_id):
user_email = get_email_from_user_id(user_id)
user_gravatar = fetch_gravatar(user_email)
update_profile_picture_data_url(user_id, user_gravatar)
def get_gravatar_url(email):
return (
'https://www.gravatar.com/avatar/%s?d=identicon&s=%s' %
(hashlib.md5(email).hexdigest(), GRAVATAR_SIZE_PX))
def fetch_gravatar(email):
gravatar_url = get_gravatar_url(email)
try:
result = urlfetch.fetch(
gravatar_url,
headers={'Content-Type': 'image/png'},
follow_redirects=False)
except (urlfetch.InvalidURLError, urlfetch.DownloadError):
logging.error('Failed to fetch Gravatar from %s' % gravatar_url)
else:
if result.status_code == 200:
if imghdr.what(None, h=result.content) == 'png':
return utils.convert_png_binary_to_data_url(result.content)
else:
logging.error(
'[Status %s] Failed to fetch Gravatar from %s' %
(result.status_code, gravatar_url))
return DEFAULT_IDENTICON_DATA_URL
def get_user_settings(user_id, strict=False):
user_settings = get_users_settings([user_id])[0]
if strict and user_settings is None:
logging.error('Could not find user with id %s' % user_id)
raise Exception('User not found.')
return user_settings
def get_user_settings_by_gae_id(gae_id, strict=False):
user_settings = _transform_user_settings(
user_models.UserSettingsModel.get_by_gae_id(gae_id))
if strict and user_settings is None:
logging.error('Could not find user with id %s' % gae_id)
raise Exception('User not found.')
return user_settings
def get_user_role_from_id(user_id):
user_settings = get_user_settings(user_id, strict=False)
if user_settings is None:
return feconf.ROLE_ID_GUEST
return user_settings.role
def get_user_community_rights(user_id):
user_model = (
user_models.UserCommunityRightsModel.get_by_id(user_id))
if user_model is not None:
return user_domain.UserCommunityRights(
user_id,
user_model.can_review_translation_for_language_codes,
user_model.can_review_voiceover_for_language_codes,
user_model.can_review_questions)
else:
return user_domain.UserCommunityRights(user_id, [], [], False)
def get_all_community_reviewers():
reviewer_models = user_models.UserCommunityRightsModel.get_all()
return [user_domain.UserCommunityRights(
model.id, model.can_review_translation_for_language_codes,
model.can_review_voiceover_for_language_codes,
model.can_review_questions) for model in reviewer_models]
def _save_user_community_rights(user_community_rights):
serCommunityRightsModel(
id=user_community_rights.id,
can_review_translation_for_language_codes=(
user_community_rights.can_review_translation_for_language_codes),
can_review_voiceover_for_language_codes=(
user_community_rights.can_review_voiceover_for_language_codes),
can_review_questions=user_community_rights.can_review_questions).put()
def _update_user_community_rights(user_community_rights):
if user_community_rights.can_review_at_least_one_item():
_save_user_community_rights(user_community_rights)
else:
remove_community_reviewer(user_community_rights.id)
def get_usernames_by_role(role):
user_settings = user_models.UserSettingsModel.get_by_role(role)
return [user.username for user in user_settings]
def get_user_ids_by_role(role):
user_settings = user_models.UserSettingsModel.get_by_role(role)
return [user.id for user in user_settings]
class UserActionsInfo(python_utils.OBJECT):
def __init__(self, user_id=None):
self._user_id = user_id
self._role = get_user_role_from_id(user_id)
self._actions = role_services.get_all_actions(self._role)
@property
def user_id(self):
return self._user_id
@property
def role(self):
return self._role
@property
def actions(self):
return self._actions
def get_system_user():
system_user = UserActionsInfo(feconf.SYSTEM_COMMITTER_ID)
return system_user
def _save_user_settings(user_settings):
user_settings.validate()
user_settings_dict = {
'gae_id': user_settings.gae_id,
'email': user_settings.email,
'role': user_settings.role,
'username': user_settings.username,
'normalized_username': user_settings.normalized_username,
'last_agreed_to_terms': user_settings.last_agreed_to_terms,
'last_started_state_editor_tutorial': (
user_settings.last_started_state_editor_tutorial),
'last_started_state_translation_tutorial': (
user_settings.last_started_state_translation_tutorial),
'last_logged_in': user_settings.last_logged_in,
'last_edited_an_exploration': user_settings.last_edited_an_exploration,
'last_created_an_exploration': (
user_settings.last_created_an_exploration),
'profile_picture_data_url': user_settings.profile_picture_data_url,
'default_dashboard': user_settings.default_dashboard,
'creator_dashboard_display_pref': (
user_settings.creator_dashboard_display_pref),
'user_bio': user_settings.user_bio,
'subject_interests': user_settings.subject_interests,
'first_contribution_msec': user_settings.first_contribution_msec,
'preferred_language_codes': user_settings.preferred_language_codes,
'preferred_site_language_code': (
user_settings.preferred_site_language_code),
'preferred_audio_language_code': (
user_settings.preferred_audio_language_code),
'deleted': user_settings.deleted
}
user_model = user_models.UserSettingsModel.get_by_id(user_settings.user_id)
if user_model is not None:
user_model.populate(**user_settings_dict)
user_model.put()
else:
user_settings_dict['id'] = user_settings.user_id
user_models.UserSettingsModel(**user_settings_dict).put()
def _transform_user_settings(user_settings_model):
if user_settings_model:
return UserSettings(
user_id=user_settings_model.id,
gae_id=user_settings_model.gae_id,
email=user_settings_model.email,
role=user_settings_model.role,
username=user_settings_model.username,
last_agreed_to_terms=user_settings_model.last_agreed_to_terms,
last_started_state_editor_tutorial=(
user_settings_model.last_started_state_editor_tutorial),
last_started_state_translation_tutorial=(
user_settings_model.last_started_state_translation_tutorial),
last_logged_in=user_settings_model.last_logged_in,
last_edited_an_exploration=(
user_settings_model.last_edited_an_exploration),
last_created_an_exploration=(
user_settings_model.last_created_an_exploration),
profile_picture_data_url=(
user_settings_model.profile_picture_data_url),
default_dashboard=user_settings_model.default_dashboard,
creator_dashboard_display_pref=(
user_settings_model.creator_dashboard_display_pref),
user_bio=user_settings_model.user_bio,
subject_interests=user_settings_model.subject_interests,
first_contribution_msec=(
user_settings_model.first_contribution_msec),
preferred_language_codes=(
user_settings_model.preferred_language_codes),
preferred_site_language_code=(
user_settings_model.preferred_site_language_code),
preferred_audio_language_code=(
user_settings_model.preferred_audio_language_code),
deleted=user_settings_model.deleted
)
else:
return None
def is_user_registered(user_id):
if user_id is None:
return False
user_settings = user_models.UserSettingsModel.get(user_id, strict=False)
return bool(user_settings)
def has_ever_registered(user_id):
user_settings = get_user_settings(user_id, strict=True)
return bool(user_settings.username and user_settings.last_agreed_to_terms)
def has_fully_registered(user_id):
if user_id is None:
return False
user_settings = get_user_settings(user_id, strict=True)
return user_settings.username and user_settings.last_agreed_to_terms and (
user_settings.last_agreed_to_terms >=
feconf.REGISTRATION_PAGE_LAST_UPDATED_UTC)
def create_new_user(gae_id, email):
user_settings = get_user_settings(gae_id, strict=False)
if user_settings is not None:
raise Exception('User %s already exists.' % gae_id)
user_id = user_models.UserSettingsModel.get_new_id('')
user_settings = UserSettings(
user_id, gae_id, email, feconf.ROLE_ID_EXPLORATION_EDITOR,
preferred_language_codes=[constants.DEFAULT_LANGUAGE_CODE])
_save_user_settings(user_settings)
create_user_contributions(user_id, [], [])
return user_settings
def get_username(user_id):
if user_id in feconf.SYSTEM_USERS:
return feconf.SYSTEM_USERS[user_id]
return get_user_settings(user_id, strict=True).username
def get_usernames(user_ids):
usernames = [None] * len(user_ids)
non_system_user_indices = []
non_system_user_ids = []
for index, user_id in enumerate(user_ids):
if user_id in feconf.SYSTEM_USERS:
usernames[index] = feconf.SYSTEM_USERS[user_id]
else:
non_system_user_indices.append(index)
non_system_user_ids.append(user_id)
non_system_users_settings = get_users_settings(non_system_user_ids)
for index, user_settings in enumerate(non_system_users_settings):
if user_settings:
usernames[non_system_user_indices[index]] = user_settings.username
return usernames
def set_username(user_id, new_username):
user_settings = get_user_settings(user_id, strict=True)
UserSettings.require_valid_username(new_username)
if is_username_taken(new_username):
raise utils.ValidationError(
'Sorry, the username \"%s\" is already taken! Please pick '
'a different one.' % new_username)
user_settings.username = new_username
_save_user_settings(user_settings)
def record_agreement_to_terms(user_id):
user_settings = get_user_settings(user_id, strict=True)
user_settings.last_agreed_to_terms = datetime.datetime.utcnow()
_save_user_settings(user_settings)
def update_profile_picture_data_url(user_id, profile_picture_data_url):
user_settings = get_user_settings(user_id, strict=True)
user_settings.profile_picture_data_url = profile_picture_data_url
_save_user_settings(user_settings)
def update_user_bio(user_id, user_bio):
user_settings = get_user_settings(user_id, strict=True)
user_settings.user_bio = user_bio
_save_user_settings(user_settings)
def update_user_default_dashboard(user_id, default_dashboard):
user_settings = get_user_settings(user_id, strict=True)
user_settings.default_dashboard = default_dashboard
_save_user_settings(user_settings)
def update_user_creator_dashboard_display(
user_id, creator_dashboard_display_pref):
user_settings = get_user_settings(user_id, strict=True)
user_settings.creator_dashboard_display_pref = (
creator_dashboard_display_pref)
_save_user_settings(user_settings)
def update_subject_interests(user_id, subject_interests):
if not isinstance(subject_interests, list):
raise utils.ValidationError('Expected subject_interests to be a list.')
else:
for interest in subject_interests:
if not isinstance(interest, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected each subject interest to be a string.')
elif not interest:
raise utils.ValidationError(
'Expected each subject interest to be non-empty.')
elif not re.match(constants.TAG_REGEX, interest):
raise utils.ValidationError(
'Expected each subject interest to consist only of '
'lowercase alphabetic characters and spaces.')
if len(set(subject_interests)) != len(subject_interests):
raise utils.ValidationError(
'Expected each subject interest to be distinct.')
user_settings = get_user_settings(user_id, strict=True)
user_settings.subject_interests = subject_interests
_save_user_settings(user_settings)
def _update_first_contribution_msec(user_id, first_contribution_msec):
user_settings = get_user_settings(user_id, strict=True)
user_settings.first_contribution_msec = first_contribution_msec
_save_user_settings(user_settings)
def update_first_contribution_msec_if_not_set(user_id, first_contribution_msec):
user_settings = get_user_settings(user_id, strict=True)
if user_settings.first_contribution_msec is None:
_update_first_contribution_msec(
user_id, first_contribution_msec)
def update_preferred_language_codes(user_id, preferred_language_codes):
user_settings = get_user_settings(user_id, strict=True)
user_settings.preferred_language_codes = preferred_language_codes
_save_user_settings(user_settings)
def update_preferred_site_language_code(user_id, preferred_site_language_code):
user_settings = get_user_settings(user_id, strict=True)
user_settings.preferred_site_language_code = (
preferred_site_language_code)
_save_user_settings(user_settings)
def update_preferred_audio_language_code(
user_id, preferred_audio_language_code):
user_settings = get_user_settings(user_id, strict=True)
user_settings.preferred_audio_language_code = (
preferred_audio_language_code)
_save_user_settings(user_settings)
def update_user_role(user_id, role):
if role not in role_services.PARENT_ROLES:
raise Exception('Role %s does not exist.' % role)
user_settings = get_user_settings(user_id, strict=True)
user_settings.role = role
_save_user_settings(user_settings)
def mark_user_for_deletion(
user_id, exploration_ids, collection_ids):
user_settings = get_user_settings(user_id, strict=True)
user_settings.deleted = True
_save_user_settings(user_settings)
user_models.PendingDeletionRequestModel(
id=user_id,
email=user_settings.email,
exploration_ids=exploration_ids,
collection_ids=collection_ids,
).put()
def get_human_readable_user_ids(user_ids):
users_settings = get_users_settings(user_ids)
usernames = []
for ind, user_settings in enumerate(users_settings):
if user_settings is None:
logging.error('User id %s not known in list of user_ids %s' % (
user_ids[ind], user_ids))
raise Exception('User not found.')
elif user_settings.user_id == feconf.SYSTEM_COMMITTER_ID:
usernames.append('admin')
elif user_settings.username:
usernames.append(user_settings.username)
else:
usernames.append(
'[Awaiting user registration: %s]' %
user_settings.truncated_email)
return usernames
def record_user_started_state_editor_tutorial(user_id):
user_settings = get_user_settings(user_id, strict=True)
user_settings.last_started_state_editor_tutorial = (
datetime.datetime.utcnow())
_save_user_settings(user_settings)
def record_user_started_state_translation_tutorial(user_id):
user_settings = get_user_settings(user_id, strict=True)
user_settings.last_started_state_translation_tutorial = (
datetime.datetime.utcnow())
_save_user_settings(user_settings)
def record_user_logged_in(user_id):
user_settings = get_user_settings(user_id, strict=True)
user_settings.last_logged_in = datetime.datetime.utcnow()
_save_user_settings(user_settings)
def update_last_logged_in(user_settings, new_last_logged_in):
user_settings.last_logged_in = new_last_logged_in
_save_user_settings(user_settings)
def record_user_edited_an_exploration(user_id):
user_settings = get_user_settings(user_id)
if user_settings:
user_settings.last_edited_an_exploration = datetime.datetime.utcnow()
_save_user_settings(user_settings)
def record_user_created_an_exploration(user_id):
user_settings = get_user_settings(user_id)
if user_settings:
user_settings.last_created_an_exploration = datetime.datetime.utcnow()
_save_user_settings(user_settings)
def update_email_preferences(
user_id, can_receive_email_updates, can_receive_editor_role_email,
can_receive_feedback_email, can_receive_subscription_email):
email_preferences_model = user_models.UserEmailPreferencesModel.get(
user_id, strict=False)
if email_preferences_model is None:
email_preferences_model = user_models.UserEmailPreferencesModel(
id=user_id)
email_preferences_model.site_updates = can_receive_email_updates
email_preferences_model.editor_role_notifications = (
can_receive_editor_role_email)
email_preferences_model.feedback_message_notifications = (
can_receive_feedback_email)
email_preferences_model.subscription_notifications = (
can_receive_subscription_email)
email_preferences_model.put()
def get_email_preferences(user_id):
email_preferences_model = user_models.UserEmailPreferencesModel.get(
user_id, strict=False)
if email_preferences_model is None:
return user_domain.UserGlobalPrefs.create_default_prefs()
else:
return user_domain.UserGlobalPrefs(
email_preferences_model.site_updates,
email_preferences_model.editor_role_notifications,
email_preferences_model.feedback_message_notifications,
email_preferences_model.subscription_notifications)
def flush_migration_bot_contributions_model():
user_contributions = get_user_contributions(
feconf.MIGRATION_BOT_USER_ID, strict=False)
if user_contributions is not None:
user_contributions.edited_exploration_ids = []
user_contributions.created_exploration_ids = []
_save_user_contributions(user_contributions)
def get_users_email_preferences(user_ids):
user_email_preferences_models = (
user_models.UserEmailPreferencesModel.get_multi(user_ids))
result = []
for email_preferences_model in user_email_preferences_models:
if email_preferences_model is None:
result.append(
user_domain.UserGlobalPrefs.create_default_prefs())
else:
result.append(user_domain.UserGlobalPrefs(
email_preferences_model.site_updates,
email_preferences_model.editor_role_notifications,
email_preferences_model.feedback_message_notifications,
email_preferences_model.subscription_notifications))
return result
def set_email_preferences_for_exploration(
user_id, exploration_id, mute_feedback_notifications=None,
mute_suggestion_notifications=None):
exploration_user_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
if exploration_user_model is None:
exploration_user_model = user_models.ExplorationUserDataModel.create(
user_id, exploration_id)
if mute_feedback_notifications is not None:
exploration_user_model.mute_feedback_notifications = (
mute_feedback_notifications)
if mute_suggestion_notifications is not None:
exploration_user_model.mute_suggestion_notifications = (
mute_suggestion_notifications)
exploration_user_model.put()
def get_email_preferences_for_exploration(user_id, exploration_id):
exploration_user_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
if exploration_user_model is None:
return user_domain.UserExplorationPrefs.create_default_prefs()
else:
return user_domain.UserExplorationPrefs(
exploration_user_model.mute_feedback_notifications,
exploration_user_model.mute_suggestion_notifications)
def get_users_email_preferences_for_exploration(user_ids, exploration_id):
exploration_user_models = (
user_models.ExplorationUserDataModel.get_multi(
user_ids, exploration_id))
result = []
for exploration_user_model in exploration_user_models:
if exploration_user_model is None:
result.append(
user_domain.UserExplorationPrefs.create_default_prefs())
else:
result.append(user_domain.UserExplorationPrefs(
exploration_user_model.mute_feedback_notifications,
exploration_user_model.mute_suggestion_notifications))
return result
class UserContributions(python_utils.OBJECT):
def __init__(
self, user_id, created_exploration_ids, edited_exploration_ids):
self.user_id = user_id
self.created_exploration_ids = created_exploration_ids
self.edited_exploration_ids = edited_exploration_ids
def validate(self):
if not isinstance(self.user_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected user_id to be a string, received %s' % self.user_id)
if not self.user_id:
raise utils.ValidationError('No user id specified.')
if not isinstance(self.created_exploration_ids, list):
raise utils.ValidationError(
'Expected created_exploration_ids to be a list, received %s'
% self.created_exploration_ids)
for exploration_id in self.created_exploration_ids:
if not isinstance(exploration_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected exploration_id in created_exploration_ids '
'to be a string, received %s' % (
exploration_id))
if not isinstance(self.edited_exploration_ids, list):
raise utils.ValidationError(
'Expected edited_exploration_ids to be a list, received %s'
% self.edited_exploration_ids)
for exploration_id in self.edited_exploration_ids:
if not isinstance(exploration_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected exploration_id in edited_exploration_ids '
'to be a string, received %s' % (
exploration_id))
def get_user_contributions(user_id, strict=False):
model = user_models.UserContributionsModel.get(user_id, strict=strict)
if model is not None:
result = UserContributions(
model.id, model.created_exploration_ids,
model.edited_exploration_ids)
else:
result = None
return result
def create_user_contributions(
user_id, created_exploration_ids, edited_exploration_ids):
user_contributions = get_user_contributions(user_id, strict=False)
if user_contributions:
raise Exception(
'User contributions model for user %s already exists.' % user_id)
else:
user_contributions = UserContributions(
user_id, created_exploration_ids, edited_exploration_ids)
_save_user_contributions(user_contributions)
return user_contributions
def update_user_contributions(
user_id, created_exploration_ids, edited_exploration_ids):
user_contributions = get_user_contributions(user_id, strict=False)
if not user_contributions:
raise Exception(
'User contributions model for user %s does not exist.' % user_id)
user_contributions.created_exploration_ids = created_exploration_ids
user_contributions.edited_exploration_ids = edited_exploration_ids
_save_user_contributions(user_contributions)
def add_created_exploration_id(user_id, exploration_id):
user_contributions = get_user_contributions(user_id, strict=False)
if not user_contributions:
create_user_contributions(user_id, [exploration_id], [])
elif exploration_id not in user_contributions.created_exploration_ids:
user_contributions.created_exploration_ids.append(exploration_id)
user_contributions.created_exploration_ids.sort()
_save_user_contributions(user_contributions)
def add_edited_exploration_id(user_id, exploration_id):
user_contributions = get_user_contributions(user_id, strict=False)
if not user_contributions:
create_user_contributions(user_id, [], [exploration_id])
elif exploration_id not in user_contributions.edited_exploration_ids:
user_contributions.edited_exploration_ids.append(exploration_id)
user_contributions.edited_exploration_ids.sort()
_save_user_contributions(user_contributions)
def _save_user_contributions(user_contributions):
user_contributions.validate()
user_models.UserContributionsModel(
id=user_contributions.user_id,
created_exploration_ids=user_contributions.created_exploration_ids,
edited_exploration_ids=user_contributions.edited_exploration_ids,
).put()
def _migrate_dashboard_stats_to_latest_schema(versioned_dashboard_stats):
stats_schema_version = versioned_dashboard_stats.schema_version
if not (1 <= stats_schema_version
<= feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d dashboard stats schemas at '
'present.' % feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION)
def get_current_date_as_string():
return datetime.datetime.utcnow().strftime(
feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT)
def parse_date_from_string(datetime_str):
datetime_obj = datetime.datetime.strptime(
datetime_str, feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT)
return {
'year': datetime_obj.year,
'month': datetime_obj.month,
'day': datetime_obj.day
}
def get_user_impact_score(user_id):
model = user_models.UserStatsModel.get(user_id, strict=False)
if model:
return model.impact_score
else:
return 0
def get_weekly_dashboard_stats(user_id):
model = user_models.UserStatsModel.get(user_id, strict=False)
if model and model.weekly_creator_stats_list:
return model.weekly_creator_stats_list
else:
return None
def get_last_week_dashboard_stats(user_id):
weekly_dashboard_stats = get_weekly_dashboard_stats(user_id)
if weekly_dashboard_stats:
return weekly_dashboard_stats[-1]
else:
return None
def update_dashboard_stats_log(user_id):
model = user_models.UserStatsModel.get_or_create(user_id)
if model.schema_version != feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION:
_migrate_dashboard_stats_to_latest_schema(model)
weekly_dashboard_stats = {
get_current_date_as_string(): {
'num_ratings': model.num_ratings or 0,
'average_ratings': model.average_ratings,
'total_plays': model.total_plays or 0
}
}
model.weekly_creator_stats_list.append(weekly_dashboard_stats)
model.put()
def is_at_least_moderator(user_id):
user_role = get_user_role_from_id(user_id)
if (user_role == feconf.ROLE_ID_MODERATOR or
user_role == feconf.ROLE_ID_ADMIN):
return True
return False
def is_admin(user_id):
user_role = get_user_role_from_id(user_id)
if user_role == feconf.ROLE_ID_ADMIN:
return True
return False
def is_topic_manager(user_id):
user_role = get_user_role_from_id(user_id)
if user_role == feconf.ROLE_ID_TOPIC_MANAGER:
return True
return False
def can_review_translation_suggestions(user_id, language_code=None):
user_community_rights = get_user_community_rights(user_id)
reviewable_language_codes = (
user_community_rights.can_review_translation_for_language_codes)
if language_code is not None:
return language_code in reviewable_language_codes
else:
return bool(reviewable_language_codes)
def can_review_voiceover_applications(user_id, language_code=None):
user_community_rights = get_user_community_rights(user_id)
reviewable_language_codes = (
user_community_rights.can_review_voiceover_for_language_codes)
if language_code is not None:
return language_code in reviewable_language_codes
else:
return bool(reviewable_language_codes)
def can_review_question_suggestions(user_id):
user_community_rights = get_user_community_rights(user_id)
return user_community_rights.can_review_questions
def allow_user_to_review_translation_in_language(user_id, language_code):
user_community_rights = get_user_community_rights(user_id)
allowed_language_codes = set(
user_community_rights.can_review_translation_for_language_codes)
allowed_language_codes.add(language_code)
user_community_rights.can_review_translation_for_language_codes = (
sorted(list(allowed_language_codes)))
_save_user_community_rights(user_community_rights)
def remove_translation_review_rights_in_language(user_id, language_code):
user_community_rights = get_user_community_rights(user_id)
user_community_rights.can_review_translation_for_language_codes.remove(
language_code)
_update_user_community_rights(user_community_rights)
def allow_user_to_review_voiceover_in_language(user_id, language_code):
user_community_rights = get_user_community_rights(user_id)
allowed_language_codes = set(
user_community_rights.can_review_voiceover_for_language_codes)
allowed_language_codes.add(language_code)
user_community_rights.can_review_voiceover_for_language_codes = (
sorted(list(allowed_language_codes)))
_save_user_community_rights(user_community_rights)
def remove_voiceover_review_rights_in_language(user_id, language_code):
user_community_rights = get_user_community_rights(user_id)
user_community_rights.can_review_voiceover_for_language_codes.remove(
language_code)
_update_user_community_rights(user_community_rights)
def allow_user_to_review_question(user_id):
user_community_rights = get_user_community_rights(user_id)
user_community_rights.can_review_questions = True
_save_user_community_rights(user_community_rights)
def remove_question_review_rights(user_id):
user_community_rights = get_user_community_rights(user_id)
user_community_rights.can_review_questions = False
_update_user_community_rights(user_community_rights)
def remove_community_reviewer(user_id):
user_community_rights_model = (
user_models.UserCommunityRightsModel.get_by_id(user_id))
if user_community_rights_model is not None:
user_community_rights_model.delete()
def get_community_reviewer_usernames(review_category, language_code=None):
reviewer_ids = []
if review_category == constants.REVIEW_CATEGORY_TRANSLATION:
reviewer_ids = (
user_models.UserCommunityRightsModel
.get_translation_reviewer_user_ids(language_code))
elif review_category == constants.REVIEW_CATEGORY_VOICEOVER:
reviewer_ids = (
user_models.UserCommunityRightsModel
.get_voiceover_reviewer_user_ids(language_code))
elif review_category == constants.REVIEW_CATEGORY_QUESTION:
if language_code is not None:
raise Exception('Expected language_code to be None, found: %s' % (
language_code))
reviewer_ids = (
user_models.UserCommunityRightsModel
.get_question_reviewer_user_ids())
else:
raise Exception('Invalid review category: %s' % review_category)
return get_usernames(reviewer_ids)
def log_username_change(committer_id, old_username, new_username):
model_id = '%s.%d' % (committer_id, utils.get_current_time_in_millisecs())
audit_models.UsernameChangeAuditModel(
id=model_id, committer_id=committer_id, old_username=old_username,
new_username=new_username).put()
| true | true |
f7142608165d85e92bb7ae364ff06ed93053dee8 | 7,827 | py | Python | python/scripts/traj_gen/chomp_trajectory.py | tomcattigerkkk/traj_gen | d01882c17d8e979860fb1f09defa968a86adb494 | [
"MIT"
] | null | null | null | python/scripts/traj_gen/chomp_trajectory.py | tomcattigerkkk/traj_gen | d01882c17d8e979860fb1f09defa968a86adb494 | [
"MIT"
] | null | null | null | python/scripts/traj_gen/chomp_trajectory.py | tomcattigerkkk/traj_gen | d01882c17d8e979860fb1f09defa968a86adb494 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
from .traj_gen_base import TrajGen
import numpy as np
import casadi as ca
from scipy.interpolate import interp1d
class CHOMPTrajGen(TrajGen):
def __init__(self, knots_, dim_, pntDensity_):
super().__init__(knots_, dim_)
self.pntDensity = pntDensity_
assert knots_.shape[0]==2, 'For optimalTraj, knots = [t0, tf]'
self.num_variables = int(np.floor((knots_[-1]-knots_[0])*pntDensity_))
self.dt = (knots_[-1]-knots_[0])/(self.num_variables-1)
self.ts = np.linspace(knots_[0], knots_[-1], self.num_variables) # different from Ts
self.Xs = np.zeros((self.dim, self.num_variables))
def findStepIndex(self, t):
"""
find the closest index of the segment
"""
time_diff = (self.ts-t)**2
return np.where(time_diff==np.min(time_diff))[0][0]
def setDerivativeObj(self, weight_mask):
self.weight_mask = weight_mask
def addPin(self, pin_):
if pin_['d'] >= self.num_variables:
print("Warning: The degree of the pin exceed the total number of variables. This pin ignored\n")
super().addPin(pin_)
X_ = pin_['X']
m = 0
if len(X_.shape) == 2: # 2 dimension ==> loose pin
if m in self.loosePinSet.keys():
self.loosePinSet[m].append(pin_)
else:
self.loosePinSet[m] = [pin_]
elif len(X_.shape) == 1: # vector ==> fix pin
if m in self.fixPinSet.keys():
self.fixPinSet[m].append(pin_)
else:
self.fixPinSet[m] = [pin_]
else:
print("Warning: Dim of pin value is invalid\n")
def getDiffMat(self, d_):
if d_ == 0:
mat_ = np.diag(np.ones(self.num_variables))
else:
mat_ = np.diag(np.ones(self.num_variables))
for j in range(1, d_+1):
D_ = np.zeros((self.num_variables-j, self.num_variables-j+1))
for i in range(self.num_variables-j):
D_[i, i:i+2] = np.array([-1, 1])
D_ = D_/self.dt
mat_ = np.dot(D_, mat_)
return mat_
def loosePin2InequalityMat(self,):
ASet = None
BSet = None
if len(self.loosePinSet.keys()) == 0:
return ASet, BSet
for pin in self.loosePinSet[0]:
a_set_ = []
b_set_ = []
for dd in range(self.dim):
n_ = np.min([self.findStepIndex(pin['t']), self.num_variables-pin['d']-1])
a_ = np.zeros((2, self.num_variables-pin['d']))
a_[:, n_] = np.array([1, -1])
a_ = np.dot(a_, self.getDiffMat(pin['d']))
a_set_.append(a_)
b_ = np.array([pin['X'][dd, 1], -pin['X'][dd, 0]]).reshape(-1, 1)
b_set_.append(b_)
if ASet is None:
ASet = np.array(a_set_)
BSet = np.array(b_set_).reshape(self.dim, -1, 1)
else:
ASet = np.concatenate((ASet, np.array(a_set_)), axis=1)
BSet = np.concatenate((BSet, np.array(b_set_).reshape(self.dim, -1, 1)), axis=1)
print('Bset final in {}'.format(BSet.shape))
return ASet, BSet
def fixPin2EqualityMat(self,):
AeqSet = None
BeqSet = None
if len(self.fixPinSet.keys())==0:
return AeqSet, BeqSet
for pin in self.fixPinSet[0]:
aeq_set_ = []
beq_set_ = []
for dd in range(self.dim):
n_ = np.min([self.findStepIndex(pin['t']), self.num_variables-pin['d']-1])
a_ = np.zeros(self.num_variables-pin['d'])
a_[n_] = 1.0
a_ = np.dot(a_, self.getDiffMat(pin['d']))
aeq_set_.append(a_)
# print(aeq_set_)
b_ = pin['X'][dd]
beq_set_.append(b_)
if AeqSet is None:
AeqSet = np.array(aeq_set_).reshape(self.dim, 1, -1)
BeqSet = np.array(beq_set_).reshape(self.dim, 1, -1)
# print(AeqSet.shape)
# print(BeqSet.shape)
else:
AeqSet = np.concatenate((AeqSet, np.array(aeq_set_).reshape(self.dim, 1, -1)), axis=1)
BeqSet = np.concatenate((BeqSet, np.array(beq_set_).reshape(self.dim, 1, -1)), axis=1)
# print(BeqSet.shape)
return AeqSet, BeqSet
def getQPset(self,):
# 1. objective
QSet = np.zeros((self.dim, self.num_variables, self.num_variables))
for dd in range(self.dim):
Q_ = np.zeros((self.num_variables, self.num_variables))
for d in range(1, self.weight_mask.shape[0]+1):
if self.weight_mask[d-1]>0:
temp_ = self.getDiffMat(d)
Qd_ = np.dot(temp_.T, temp_)
Q_ = Q_ + self.weight_mask[d-1]*Qd_
QSet[dd] = Q_
# 2. constraints
ASet, BSet = self.loosePin2InequalityMat()
AeqSet, BeqSet = self.fixPin2EqualityMat()
return QSet, ASet, BSet, AeqSet, BeqSet
def solve(self,):
self.isSolved = True
# prepare QP
QSet, ASet, BSet, AeqSet, BeqSet = self.getQPset()
if ASet is None:
print("Please define the beginning and also the end pins")
return False
for dd in range(self.dim):
print('solving {}-th dimension.. \n'.format(dd))
x_sym = ca.SX.sym('x', QSet[0].shape[0])
opts_setting = {'ipopt.max_iter':100, 'ipopt.print_level':0, 'print_time':0, 'ipopt.acceptable_tol':1e-8, 'ipopt.acceptable_obj_change_tol':1e-6}
obj = ca.mtimes([x_sym.T, QSet[dd], x_sym])
if ASet is None:
a_set = AeqSet[dd].copy()
else:
a_set = np.concatenate((ASet[dd], AeqSet[dd]))
Ax_sym = ca.mtimes([a_set, x_sym])
if BSet is None:
b_set_u = BeqSet[dd]
b_set_l = BeqSet[dd]
else:
b_set_u = np.concatenate((BSet[dd], BeqSet[dd]), axis=0) # Ax <= b_set_u
b_set_l = np.concatenate((-np.inf*np.ones(BSet[dd].shape), BeqSet[dd]), axis=0) # Ax >= b_set_l
nlp_prob = {'f': obj, 'x': x_sym, 'g':Ax_sym}
solver = ca.nlpsol('solver', 'ipopt', nlp_prob, opts_setting)
try:
result = solver(lbg=b_set_l, ubg=b_set_u,)
Phat_ = result['x']
# print(Phat_)
flag_ = True
except:
Phat_ = None
flag_ = False
if flag_:
self.Xs[dd] = Phat_.full().flatten()
else:
self.isSolved = False
print("Failure ..")
return False
return True
def eval(self, t_, d_):
val_ = np.zeros((self.dim, t_.shape[0]))
for dd in range(self.dim):
for idx in range(t_.shape[0]):
t_i = t_[idx]
if t_i < self.Ts[0] or t_i > self.Ts[-1]:
print("WARNING: Eval of t: out of bound. Extrapolation\n")
Xsd_ = np.dot(self.getDiffMat(d_), self.Xs[dd].T)
if d_ >0:
t_v_ = self.ts[:-d_]
else:
t_v_ = self.ts
# print(t_v_.shape)
# print(Xsd_.shape)
set_interp = interp1d(t_v_, Xsd_, kind='linear')
# print(t_v_[-1])
# print(t_[idx])
if t_[idx] <= t_v_[-1]:
val_[dd, idx] = set_interp(t_[idx])
else:
val_[dd, idx] = set_interp(t_v_[-1])
return val_
| 39.530303 | 157 | 0.505302 |
from .traj_gen_base import TrajGen
import numpy as np
import casadi as ca
from scipy.interpolate import interp1d
class CHOMPTrajGen(TrajGen):
def __init__(self, knots_, dim_, pntDensity_):
super().__init__(knots_, dim_)
self.pntDensity = pntDensity_
assert knots_.shape[0]==2, 'For optimalTraj, knots = [t0, tf]'
self.num_variables = int(np.floor((knots_[-1]-knots_[0])*pntDensity_))
self.dt = (knots_[-1]-knots_[0])/(self.num_variables-1)
self.ts = np.linspace(knots_[0], knots_[-1], self.num_variables)
self.Xs = np.zeros((self.dim, self.num_variables))
def findStepIndex(self, t):
time_diff = (self.ts-t)**2
return np.where(time_diff==np.min(time_diff))[0][0]
def setDerivativeObj(self, weight_mask):
self.weight_mask = weight_mask
def addPin(self, pin_):
if pin_['d'] >= self.num_variables:
print("Warning: The degree of the pin exceed the total number of variables. This pin ignored\n")
super().addPin(pin_)
X_ = pin_['X']
m = 0
if len(X_.shape) == 2:
if m in self.loosePinSet.keys():
self.loosePinSet[m].append(pin_)
else:
self.loosePinSet[m] = [pin_]
elif len(X_.shape) == 1:
if m in self.fixPinSet.keys():
self.fixPinSet[m].append(pin_)
else:
self.fixPinSet[m] = [pin_]
else:
print("Warning: Dim of pin value is invalid\n")
def getDiffMat(self, d_):
if d_ == 0:
mat_ = np.diag(np.ones(self.num_variables))
else:
mat_ = np.diag(np.ones(self.num_variables))
for j in range(1, d_+1):
D_ = np.zeros((self.num_variables-j, self.num_variables-j+1))
for i in range(self.num_variables-j):
D_[i, i:i+2] = np.array([-1, 1])
D_ = D_/self.dt
mat_ = np.dot(D_, mat_)
return mat_
def loosePin2InequalityMat(self,):
ASet = None
BSet = None
if len(self.loosePinSet.keys()) == 0:
return ASet, BSet
for pin in self.loosePinSet[0]:
a_set_ = []
b_set_ = []
for dd in range(self.dim):
n_ = np.min([self.findStepIndex(pin['t']), self.num_variables-pin['d']-1])
a_ = np.zeros((2, self.num_variables-pin['d']))
a_[:, n_] = np.array([1, -1])
a_ = np.dot(a_, self.getDiffMat(pin['d']))
a_set_.append(a_)
b_ = np.array([pin['X'][dd, 1], -pin['X'][dd, 0]]).reshape(-1, 1)
b_set_.append(b_)
if ASet is None:
ASet = np.array(a_set_)
BSet = np.array(b_set_).reshape(self.dim, -1, 1)
else:
ASet = np.concatenate((ASet, np.array(a_set_)), axis=1)
BSet = np.concatenate((BSet, np.array(b_set_).reshape(self.dim, -1, 1)), axis=1)
print('Bset final in {}'.format(BSet.shape))
return ASet, BSet
def fixPin2EqualityMat(self,):
AeqSet = None
BeqSet = None
if len(self.fixPinSet.keys())==0:
return AeqSet, BeqSet
for pin in self.fixPinSet[0]:
aeq_set_ = []
beq_set_ = []
for dd in range(self.dim):
n_ = np.min([self.findStepIndex(pin['t']), self.num_variables-pin['d']-1])
a_ = np.zeros(self.num_variables-pin['d'])
a_[n_] = 1.0
a_ = np.dot(a_, self.getDiffMat(pin['d']))
aeq_set_.append(a_)
b_ = pin['X'][dd]
beq_set_.append(b_)
if AeqSet is None:
AeqSet = np.array(aeq_set_).reshape(self.dim, 1, -1)
BeqSet = np.array(beq_set_).reshape(self.dim, 1, -1)
else:
AeqSet = np.concatenate((AeqSet, np.array(aeq_set_).reshape(self.dim, 1, -1)), axis=1)
BeqSet = np.concatenate((BeqSet, np.array(beq_set_).reshape(self.dim, 1, -1)), axis=1)
return AeqSet, BeqSet
def getQPset(self,):
QSet = np.zeros((self.dim, self.num_variables, self.num_variables))
for dd in range(self.dim):
Q_ = np.zeros((self.num_variables, self.num_variables))
for d in range(1, self.weight_mask.shape[0]+1):
if self.weight_mask[d-1]>0:
temp_ = self.getDiffMat(d)
Qd_ = np.dot(temp_.T, temp_)
Q_ = Q_ + self.weight_mask[d-1]*Qd_
QSet[dd] = Q_
ASet, BSet = self.loosePin2InequalityMat()
AeqSet, BeqSet = self.fixPin2EqualityMat()
return QSet, ASet, BSet, AeqSet, BeqSet
def solve(self,):
self.isSolved = True
QSet, ASet, BSet, AeqSet, BeqSet = self.getQPset()
if ASet is None:
print("Please define the beginning and also the end pins")
return False
for dd in range(self.dim):
print('solving {}-th dimension.. \n'.format(dd))
x_sym = ca.SX.sym('x', QSet[0].shape[0])
opts_setting = {'ipopt.max_iter':100, 'ipopt.print_level':0, 'print_time':0, 'ipopt.acceptable_tol':1e-8, 'ipopt.acceptable_obj_change_tol':1e-6}
obj = ca.mtimes([x_sym.T, QSet[dd], x_sym])
if ASet is None:
a_set = AeqSet[dd].copy()
else:
a_set = np.concatenate((ASet[dd], AeqSet[dd]))
Ax_sym = ca.mtimes([a_set, x_sym])
if BSet is None:
b_set_u = BeqSet[dd]
b_set_l = BeqSet[dd]
else:
b_set_u = np.concatenate((BSet[dd], BeqSet[dd]), axis=0)
b_set_l = np.concatenate((-np.inf*np.ones(BSet[dd].shape), BeqSet[dd]), axis=0)
nlp_prob = {'f': obj, 'x': x_sym, 'g':Ax_sym}
solver = ca.nlpsol('solver', 'ipopt', nlp_prob, opts_setting)
try:
result = solver(lbg=b_set_l, ubg=b_set_u,)
Phat_ = result['x']
flag_ = True
except:
Phat_ = None
flag_ = False
if flag_:
self.Xs[dd] = Phat_.full().flatten()
else:
self.isSolved = False
print("Failure ..")
return False
return True
def eval(self, t_, d_):
val_ = np.zeros((self.dim, t_.shape[0]))
for dd in range(self.dim):
for idx in range(t_.shape[0]):
t_i = t_[idx]
if t_i < self.Ts[0] or t_i > self.Ts[-1]:
print("WARNING: Eval of t: out of bound. Extrapolation\n")
Xsd_ = np.dot(self.getDiffMat(d_), self.Xs[dd].T)
if d_ >0:
t_v_ = self.ts[:-d_]
else:
t_v_ = self.ts
set_interp = interp1d(t_v_, Xsd_, kind='linear')
if t_[idx] <= t_v_[-1]:
val_[dd, idx] = set_interp(t_[idx])
else:
val_[dd, idx] = set_interp(t_v_[-1])
return val_
| true | true |
f71428346cb81628139bf8ea3efe2d5d9ce53196 | 1,858 | py | Python | setup.py | infosmith/batteries | e1e018043392997ec43f55f874231d5feb684110 | [
"MIT"
] | null | null | null | setup.py | infosmith/batteries | e1e018043392997ec43f55f874231d5feb684110 | [
"MIT"
] | 2 | 2022-01-04T06:14:09.000Z | 2022-01-04T09:18:14.000Z | setup.py | infosmith/helpers | e1e018043392997ec43f55f874231d5feb684110 | [
"MIT"
] | null | null | null | """Package setup script."""
from setuptools import setup, find_packages
# Python packaging constants
CLASSIFIERS = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
LICENSES = {
'MIT license':
'License :: OSI Approved :: MIT License',
'BSD license':
'License :: OSI Approved :: BSD License',
'Apache Software License 2.0':
'License :: OSI Approved :: Apache Software License',
'GNU General Public License v3':
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'
}
REQUIREMENTS = {
'install': [],
'setup': ['pytest-runner'],
'tests': ['pytest']
}
# Project constants
EMAIL = 'infosmith@prontonmail.com'
FULL_NAME = "David S."
GITHUB_ACCOUNT = 'infosmith'
LICENSE = 'MIT license'
PROJECT_SLUG = 'helpers'
PROJECT_SHORT_DESCRIPTION = 'Improved developer experience, accumulated.'
VERSION = '0.3.0'
# Project conditional configuration
if 'MIT license' in LICENSES.keys():
CLASSIFIERS.append(LICENSES['MIT license'])
# Configure project
setup(
author=FULL_NAME,
author_email=EMAIL,
classifiers=CLASSIFIERS,
description=PROJECT_SHORT_DESCRIPTION,
include_package_data=True,
install_requires=REQUIREMENTS['install'],
keywords=PROJECT_SLUG,
license=LICENSE,
name=PROJECT_SLUG,
packages=find_packages(include=[PROJECT_SLUG]),
setup_requires=REQUIREMENTS['setup'],
test_suite='tests',
tests_require=REQUIREMENTS['tests'],
url="https://github.com/{}/{}".format(GITHUB_ACCOUNT, PROJECT_SLUG),
version=VERSION,
zip_safe=False,
)
| 29.03125 | 74 | 0.678149 |
from setuptools import setup, find_packages
CLASSIFIERS = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
LICENSES = {
'MIT license':
'License :: OSI Approved :: MIT License',
'BSD license':
'License :: OSI Approved :: BSD License',
'Apache Software License 2.0':
'License :: OSI Approved :: Apache Software License',
'GNU General Public License v3':
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'
}
REQUIREMENTS = {
'install': [],
'setup': ['pytest-runner'],
'tests': ['pytest']
}
EMAIL = 'infosmith@prontonmail.com'
FULL_NAME = "David S."
GITHUB_ACCOUNT = 'infosmith'
LICENSE = 'MIT license'
PROJECT_SLUG = 'helpers'
PROJECT_SHORT_DESCRIPTION = 'Improved developer experience, accumulated.'
VERSION = '0.3.0'
if 'MIT license' in LICENSES.keys():
CLASSIFIERS.append(LICENSES['MIT license'])
setup(
author=FULL_NAME,
author_email=EMAIL,
classifiers=CLASSIFIERS,
description=PROJECT_SHORT_DESCRIPTION,
include_package_data=True,
install_requires=REQUIREMENTS['install'],
keywords=PROJECT_SLUG,
license=LICENSE,
name=PROJECT_SLUG,
packages=find_packages(include=[PROJECT_SLUG]),
setup_requires=REQUIREMENTS['setup'],
test_suite='tests',
tests_require=REQUIREMENTS['tests'],
url="https://github.com/{}/{}".format(GITHUB_ACCOUNT, PROJECT_SLUG),
version=VERSION,
zip_safe=False,
)
| true | true |
f71428e49e034b578ef70ffe2a26d9dc7901f807 | 403 | py | Python | DiscordOauth2/wsgi.py | TShoKT/Django-DiscordOauth2 | 2deab89d4bbb0f36ef405e8f29f689525df479e2 | [
"MIT"
] | 4 | 2021-09-01T10:55:06.000Z | 2022-02-07T16:37:22.000Z | DiscordOauth2/wsgi.py | TShoKT/Django-DiscordOauth2 | 2deab89d4bbb0f36ef405e8f29f689525df479e2 | [
"MIT"
] | 1 | 2021-10-03T09:45:05.000Z | 2021-10-06T18:20:17.000Z | DiscordOauth2/wsgi.py | TShoKT/Django-DiscordOauth2 | 2deab89d4bbb0f36ef405e8f29f689525df479e2 | [
"MIT"
] | 3 | 2021-09-11T18:49:33.000Z | 2021-12-28T16:49:42.000Z | """
WSGI config for DiscordOauth2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DiscordOauth2.settings')
application = get_wsgi_application()
| 23.705882 | 78 | 0.791563 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DiscordOauth2.settings')
application = get_wsgi_application()
| true | true |
f7142941df2d1fb97ae8bcfca78c1126c1828d24 | 5,515 | py | Python | docs/cornell CS class/Lesson 29. Coroutines/demos/animate3.py | LizzieDeng/kalman_fliter_analysis | 50e728f32c496c3fcbb8ca3ee00857b999b88d99 | [
"MIT"
] | null | null | null | docs/cornell CS class/Lesson 29. Coroutines/demos/animate3.py | LizzieDeng/kalman_fliter_analysis | 50e728f32c496c3fcbb8ca3ee00857b999b88d99 | [
"MIT"
] | null | null | null | docs/cornell CS class/Lesson 29. Coroutines/demos/animate3.py | LizzieDeng/kalman_fliter_analysis | 50e728f32c496c3fcbb8ca3ee00857b999b88d99 | [
"MIT"
] | null | null | null | """
A module to show off a timed animation using coroutines
Making timed animations is messy, because we have to add a lot of
class attributes for all of the loop variables. A cleaner way is
to do this with coroutines. Each animation is its own coroutine.
The advantage of the coroutine is that yield allows you to pause
to let the game class draw. If you do not do that, then your
loop will keep going and you will never get a chance to draw. And
if you do not draw, there is no animation.
Author: Walker M. White (wmw2)
Date: November 20, 2019
"""
import introcs
import random
import math
from game2d import *
import time
import random
############# CONSTANTS #############
# Window Size
WINDOW_WIDTH = 512
WINDOW_HEIGHT = 512
# THE ANIMATION SPEED IN SECONDS
ANIMATION_SPEED = 1
############# CONTROLLER CLASS #############
class Animation(GameApp):
"""
This class is an application to animate an image with the arrow keys
At each step, the update() method checks for key input
and moves the image accordingly.
Attribute view : the view (inherited from GameApp)
Invariant: view is an instance of GView
Attribute image: the image to animate
Invariant: image is a GImage made from a PNG file
"""
# Attribute _animator: A coroutine for performing an animation
# Invariant: _animator is a generator-based coroutine (or None)
# THE THREE MAIN METHODS
def start(self):
"""
Initializes the application, creating new attributes.
"""
self.image = GImage(x=WINDOW_WIDTH/2,y=WINDOW_HEIGHT/2,source='Walker.png')
self.image.angle = 0 # Doing this prevents a slow down due to initialization
self._animator = None
def update(self,dt):
"""
Animates the image.
Parameter dt: The time since the last animation frame.
Precondition: dt is a float.
"""
if not self._animator is None: # We have something to animate
try:
self._animator.send(dt) # Tell it how far to animate
except:
self._animator = None # Stop animating
elif self.input.is_key_down('left'):
self._animator = self._animate_turn('left')
next(self._animator) # Start up the animator
elif self.input.is_key_down('right'):
self._animator = self._animate_turn('right')
next(self._animator) # Start up the animator
elif self.input.is_key_down('up'):
self._animator = self._animate_slide('up')
next(self._animator) # Start up the animator
elif self.input.is_key_down('down'):
self._animator = self._animate_slide('down')
next(self._animator) # Start up the animator
def draw(self):
"""
Draws the image
"""
self.image.draw(self.view)
def _animate_turn(self,direction):
"""
Animates a rotation of the image over ANIMATION_SPEED seconds
This method is a coroutine that takes a break (so that the game
can redraw the image) every time it moves it. The coroutine takes
the dt as periodic input so it knows how many (parts of) seconds
to animate.
Parameter dt: The time since the last animation frame.
Precondition: dt is a float.
Parameter direction: The direction to rotate.
Precondition: direction is a string and one of 'left' or 'right'.
"""
sangle = self.image.angle
if direction == 'left':
fangle = sangle+90
else:
fangle = sangle-90
# Degrees per second
steps = (fangle-sangle)/ANIMATION_SPEED
animating = True
while animating:
# Get the current time
dt = (yield)
amount = steps*dt
# Update the angle
self.image.angle = self.image.angle+amount
# If we go to far, clamp and stop animating
if abs(self.image.angle-sangle) >= 90:
self.image.angle = fangle
animating = False
def _animate_slide(self,direction):
"""
Animates a vertical up or down of the image over ANIMATION_SPEED seconds
This method is a coroutine that takes a break (so that the game
can redraw the image) every time it moves it. The coroutine takes
the dt as periodic input so it knows how many (parts of) seconds
to animate.
Parameter dt: The time since the last animation frame.
Precondition: dt is a float.
Parameter direction: The direction to slide.
Precondition: direction is a string and one of 'up' or 'down'.
"""
svert = self.image.y
if direction == 'up':
fvert = svert+self.image.height
else:
fvert = svert-self.image.height
# Degrees per second
steps = (fvert-svert)/ANIMATION_SPEED
animating = True
while animating:
# Get the current time
dt = (yield)
amount = steps*dt
# Update the angle
self.image.y = self.image.y+amount
# If we go to far, clamp and stop animating
if abs(self.image.y-svert) >= self.image.height:
self.image.y = fvert
animating = False
# Application Code
if __name__ == '__main__':
Animation(left=150,width=WINDOW_WIDTH,height=WINDOW_HEIGHT,fps=60.0).run()
| 32.827381 | 84 | 0.616863 |
import introcs
import random
import math
from game2d import *
import time
import random
next(self._animator)
def draw(self):
self.image.draw(self.view)
def _animate_turn(self,direction):
sangle = self.image.angle
if direction == 'left':
fangle = sangle+90
else:
fangle = sangle-90
steps = (fangle-sangle)/ANIMATION_SPEED
animating = True
while animating:
dt = (yield)
amount = steps*dt
self.image.angle = self.image.angle+amount
if abs(self.image.angle-sangle) >= 90:
self.image.angle = fangle
animating = False
def _animate_slide(self,direction):
svert = self.image.y
if direction == 'up':
fvert = svert+self.image.height
else:
fvert = svert-self.image.height
steps = (fvert-svert)/ANIMATION_SPEED
animating = True
while animating:
dt = (yield)
amount = steps*dt
self.image.y = self.image.y+amount
if abs(self.image.y-svert) >= self.image.height:
self.image.y = fvert
animating = False
if __name__ == '__main__':
Animation(left=150,width=WINDOW_WIDTH,height=WINDOW_HEIGHT,fps=60.0).run()
| true | true |
f7142aa0459addf88df1549845b063ae44233e96 | 5,200 | py | Python | azure-devops/azext_devops/vstsCompressed/models/models.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | azure-devops/azext_devops/vstsCompressed/models/models.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | 37 | 2020-04-27T07:45:19.000Z | 2021-04-05T07:27:15.000Z | azure-devops/azext_devops/vstsCompressed/models/models.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ApiResourceLocation(Model):
"""ApiResourceLocation.
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'area': {'key': 'area', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
'route_template': {'key': 'routeTemplate', 'type': 'str'},
'resource_version': {'key': 'resourceVersion', 'type': 'int'},
'min_version': {'key': 'minVersion', 'type': 'float'},
'max_version': {'key': 'maxVersion', 'type': 'float'},
'released_version': {'key': 'releasedVersion', 'type': 'str'},
}
def __init__(self, id=None, area=None, resource_name=None,
route_template=None, resource_version=None,
min_version=None, max_version=None,
released_version=None):
super(ApiResourceLocation, self).__init__()
self.id = id
self.area = area
self.resource_name = resource_name
self.route_template = route_template
self.resource_version = resource_version
self.min_version = min_version
self.max_version = max_version
self.released_version = released_version
class ImproperException(Model):
"""ImproperException.
:param message:
:type message: str
"""
_attribute_map = {
'message': {'key': 'Message', 'type': 'str'}
}
def __init__(self, message=None):
super(ImproperException, self).__init__()
self.message = message
class SystemException(Model):
"""SystemException.
:param class_name:
:type class_name: str
:param inner_exception:
:type inner_exception: :class:`SystemException <vsts.models.SystemException>`
:param message:
:type message: str
"""
_attribute_map = {
'class_name': {'key': 'ClassName', 'type': 'str'},
'message': {'key': 'Message', 'type': 'str'},
'inner_exception': {'key': 'InnerException', 'type': 'SystemException'}
}
def __init__(self, class_name=None, message=None, inner_exception=None):
super(SystemException, self).__init__()
self.class_name = class_name
self.message = message
self.inner_exception = inner_exception
class VssJsonCollectionWrapperBase(Model):
"""VssJsonCollectionWrapperBase.
:param count:
:type count: int
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'}
}
def __init__(self, count=None):
super(VssJsonCollectionWrapperBase, self).__init__()
self.count = count
class WrappedException(Model):
"""WrappedException.
:param exception_id:
:type exception_id: str
:param inner_exception:
:type inner_exception: :class:`WrappedException <vsts.models.WrappedException>`
:param message:
:type message: str
:param type_name:
:type type_name: str
:param type_key:
:type type_key: str
:param error_code:
:type error_code: int
:param event_id:
:type event_id: int
:param custom_properties:
:type custom_properties: dict
"""
_attribute_map = {
'exception_id': {'key': '$id', 'type': 'str'},
'inner_exception': {'key': 'innerException', 'type': 'WrappedException'},
'message': {'key': 'message', 'type': 'str'},
'type_name': {'key': 'typeName', 'type': 'str'},
'type_key': {'key': 'typeKey', 'type': 'str'},
'error_code': {'key': 'errorCode', 'type': 'int'},
'event_id': {'key': 'eventId', 'type': 'int'},
'custom_properties': {'key': 'customProperties', 'type': '{object}'}
}
def __init__(self, exception_id=None, inner_exception=None, message=None,
type_name=None, type_key=None, error_code=None, event_id=None, custom_properties=None):
super(WrappedException, self).__init__()
self.exception_id = exception_id
self.inner_exception = inner_exception
self.message = message
self.type_name = type_name
self.type_key = type_key
self.error_code = error_code
self.event_id = event_id
self.custom_properties = custom_properties
class VssJsonCollectionWrapper(VssJsonCollectionWrapperBase):
"""VssJsonCollectionWrapper.
:param count:
:type count: int
:param value:
:type value: object
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'value': {'key': 'value', 'type': 'object'}
}
def __init__(self, count=None, value=None):
super(VssJsonCollectionWrapper, self).__init__(count=count)
self.value = value
| 32.911392 | 104 | 0.59 |
from msrest.serialization import Model
class ApiResourceLocation(Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'area': {'key': 'area', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
'route_template': {'key': 'routeTemplate', 'type': 'str'},
'resource_version': {'key': 'resourceVersion', 'type': 'int'},
'min_version': {'key': 'minVersion', 'type': 'float'},
'max_version': {'key': 'maxVersion', 'type': 'float'},
'released_version': {'key': 'releasedVersion', 'type': 'str'},
}
def __init__(self, id=None, area=None, resource_name=None,
route_template=None, resource_version=None,
min_version=None, max_version=None,
released_version=None):
super(ApiResourceLocation, self).__init__()
self.id = id
self.area = area
self.resource_name = resource_name
self.route_template = route_template
self.resource_version = resource_version
self.min_version = min_version
self.max_version = max_version
self.released_version = released_version
class ImproperException(Model):
_attribute_map = {
'message': {'key': 'Message', 'type': 'str'}
}
def __init__(self, message=None):
super(ImproperException, self).__init__()
self.message = message
class SystemException(Model):
_attribute_map = {
'class_name': {'key': 'ClassName', 'type': 'str'},
'message': {'key': 'Message', 'type': 'str'},
'inner_exception': {'key': 'InnerException', 'type': 'SystemException'}
}
def __init__(self, class_name=None, message=None, inner_exception=None):
super(SystemException, self).__init__()
self.class_name = class_name
self.message = message
self.inner_exception = inner_exception
class VssJsonCollectionWrapperBase(Model):
_attribute_map = {
'count': {'key': 'count', 'type': 'int'}
}
def __init__(self, count=None):
super(VssJsonCollectionWrapperBase, self).__init__()
self.count = count
class WrappedException(Model):
_attribute_map = {
'exception_id': {'key': '$id', 'type': 'str'},
'inner_exception': {'key': 'innerException', 'type': 'WrappedException'},
'message': {'key': 'message', 'type': 'str'},
'type_name': {'key': 'typeName', 'type': 'str'},
'type_key': {'key': 'typeKey', 'type': 'str'},
'error_code': {'key': 'errorCode', 'type': 'int'},
'event_id': {'key': 'eventId', 'type': 'int'},
'custom_properties': {'key': 'customProperties', 'type': '{object}'}
}
def __init__(self, exception_id=None, inner_exception=None, message=None,
type_name=None, type_key=None, error_code=None, event_id=None, custom_properties=None):
super(WrappedException, self).__init__()
self.exception_id = exception_id
self.inner_exception = inner_exception
self.message = message
self.type_name = type_name
self.type_key = type_key
self.error_code = error_code
self.event_id = event_id
self.custom_properties = custom_properties
class VssJsonCollectionWrapper(VssJsonCollectionWrapperBase):
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'value': {'key': 'value', 'type': 'object'}
}
def __init__(self, count=None, value=None):
super(VssJsonCollectionWrapper, self).__init__(count=count)
self.value = value
| true | true |
f7142b7c798ada6e8b0a89e5f7341c459d5053a1 | 6,028 | py | Python | client.py | ElianMariano/Sistema-de-Trocas | a1caf6a1503cfa7a81fd48fbe81f19fd91f0eb3c | [
"MIT"
] | null | null | null | client.py | ElianMariano/Sistema-de-Trocas | a1caf6a1503cfa7a81fd48fbe81f19fd91f0eb3c | [
"MIT"
] | null | null | null | client.py | ElianMariano/Sistema-de-Trocas | a1caf6a1503cfa7a81fd48fbe81f19fd91f0eb3c | [
"MIT"
] | null | null | null | #from lib2to3.pytree import convert
import socket
import sys
import _thread
import json
import os
import time
import zmq
IP_ADDRESS = '127.0.0.1'
TOPIC = None
fila_msgs = []
conf = []
# Envia os dados
def enviar():
ctx = zmq.Context()
sock = ctx.socket(zmq.PUB)
sock.connect(f"tcp://{IP_ADDRESS}:5500")
codigo = 5
# Executa uma acao de acordo com o codigo informado
while True:
if(len(fila_msgs) == 0):
pass
else:
data = fila_msgs.pop(0)
data_converted = json.loads(data)
codigo = data_converted['codigo']
if(codigo == 1):
msg_json = data
TOPIC = 'login'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
if (codigo == 2):
msg_json = data
TOPIC = 'cadastrar'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
if codigo == 4 :
msg_json = data
TOPIC = 'usuario'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
if codigo == 9:
msg_json = data
TOPIC = 'pedirListaAnuncios'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
if codigo == 10:
msg_json = data
TOPIC = 'anuncio'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
# Recebe a confirmacao do broker
def receberConfirmacao():
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.connect(f"tcp://{IP_ADDRESS}:5501")
while True:
TOPIC = 'confirmacao'
sock.subscribe(f"{TOPIC}")
msg_string = sock.recv_string()
msg_json = sock.recv_json()
#print(msg_json)
# Dados da confirmacao
data = msg_json
data_converted = json.loads(data)
codigo = data_converted['codigo']
codigo2 = data_converted['codigo2']
confirmacao = data_converted['confirmacao']
# Adiciona a confirmacao
conf.append(confirmacao)
# Recebe a lista de anuncios
def receberAnuncios():
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.connect(f"tcp://{IP_ADDRESS}:5501")
while True:
# Recebe os dados do usuario
TOPIC = 'anuncios'
sock.subscribe(f"{TOPIC}")
msg_string = sock.recv_string()
msg_json = sock.recv_json()
#print(msg_json)
# Mostra os dados do usuario
data = msg_json
anuncios = json.loads(data)
os.system('clear') or None
for anuncio in anuncios:
print("================================")
print('Anuncio ID: ', anuncio['id'])
print('Produto ID: ', anuncio['produto_id'])
print('Descricao: ', anuncio['descricao'])
print('De cliente: ', anuncio['de_cliente'])
print('Data: ', anuncio['data'])
print("================================")
# Recebe o perfil
def verPerfil():
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.connect(f"tcp://{IP_ADDRESS}:5501")
while True:
# Recebe os dados do usuario
TOPIC = 'dados_usuario'
sock.subscribe(f"{TOPIC}")
msg_string = sock.recv_string()
msg_json = sock.recv_json()
#print(msg_json)
# Mostra os dados do usuario
data = msg_json
converted = json.loads(data)
nome = converted['nome']
dataNasc = converted['nascimento']
cpf = converted['cpf']
email = converted['email']
senha = converted['senha']
os.system('clear') or None
print("================================")
print("Nome : " + nome)
print("Data de Nascimento : " + dataNasc)
print("CPF : " + cpf)
print("Email : " + email)
print("Senha : " + senha)
print("================================")
# Roda o menu
def client():
_thread.start_new_thread(enviar,())
_thread.start_new_thread(receberConfirmacao,())
_thread.start_new_thread(verPerfil,())
ri = 'nao'
ctx = zmq.Context()
sock = ctx.socket(zmq.PUB)
sock.connect(f"tcp://{IP_ADDRESS}:5500")
opc = None
#time.sleep(20)
while opc != "4" :
os.system('clear') or None
print("================================")
print(" 1 - Logar")
print(" 2 - Criar Conta")
print(" 4 - Sair")
print("================================")
opc = input('Digite uma Opcao: ')
if opc == '1' :
os.system('clear') or None
email = input("Digite o email: ")
senha = input("Digite a senha: ")
msg= {}
msg ['codigo'] = 1
msg ['codigo2'] = 1
msg ['email'] = email
msg ['senha'] = senha
msg_json = json.dumps(msg)
fila_msgs.append(msg_json)
if opc == '2':
os.system('clear') or None
nome = input("Digite o seu nome: ")
nascimento = input("Digite sua data Nascimento: ")
endereco = input("Digite seu endereço: ")
cpf = input("Digite seu cpf: ")
email = input("Digite seu Email: ")
senha = input("Digite sua senha: ")
msg= {}
msg ['codigo'] = 2
msg ['codigo2'] = 2
msg ['nome'] = nome
msg ['nascimento'] = nascimento
msg ['endereco'] = endereco
msg ['cpf'] = cpf
msg ['email'] = email
msg ['senha'] = senha
msg_json = json.dumps(msg)
fila_msgs.append(msg_json)
if __name__ == "__main__":
client() | 30.291457 | 62 | 0.503152 |
import socket
import sys
import _thread
import json
import os
import time
import zmq
IP_ADDRESS = '127.0.0.1'
TOPIC = None
fila_msgs = []
conf = []
def enviar():
ctx = zmq.Context()
sock = ctx.socket(zmq.PUB)
sock.connect(f"tcp://{IP_ADDRESS}:5500")
codigo = 5
while True:
if(len(fila_msgs) == 0):
pass
else:
data = fila_msgs.pop(0)
data_converted = json.loads(data)
codigo = data_converted['codigo']
if(codigo == 1):
msg_json = data
TOPIC = 'login'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
if (codigo == 2):
msg_json = data
TOPIC = 'cadastrar'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
if codigo == 4 :
msg_json = data
TOPIC = 'usuario'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
if codigo == 9:
msg_json = data
TOPIC = 'pedirListaAnuncios'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
if codigo == 10:
msg_json = data
TOPIC = 'anuncio'
sock.send_string(f"{TOPIC}", flags=zmq.SNDMORE)
sock.send_json(msg_json)
codigo = 5
def receberConfirmacao():
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.connect(f"tcp://{IP_ADDRESS}:5501")
while True:
TOPIC = 'confirmacao'
sock.subscribe(f"{TOPIC}")
msg_string = sock.recv_string()
msg_json = sock.recv_json()
data = msg_json
data_converted = json.loads(data)
codigo = data_converted['codigo']
codigo2 = data_converted['codigo2']
confirmacao = data_converted['confirmacao']
conf.append(confirmacao)
def receberAnuncios():
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.connect(f"tcp://{IP_ADDRESS}:5501")
while True:
TOPIC = 'anuncios'
sock.subscribe(f"{TOPIC}")
msg_string = sock.recv_string()
msg_json = sock.recv_json()
data = msg_json
anuncios = json.loads(data)
os.system('clear') or None
for anuncio in anuncios:
print("================================")
print('Anuncio ID: ', anuncio['id'])
print('Produto ID: ', anuncio['produto_id'])
print('Descricao: ', anuncio['descricao'])
print('De cliente: ', anuncio['de_cliente'])
print('Data: ', anuncio['data'])
print("================================")
def verPerfil():
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.connect(f"tcp://{IP_ADDRESS}:5501")
while True:
TOPIC = 'dados_usuario'
sock.subscribe(f"{TOPIC}")
msg_string = sock.recv_string()
msg_json = sock.recv_json()
data = msg_json
converted = json.loads(data)
nome = converted['nome']
dataNasc = converted['nascimento']
cpf = converted['cpf']
email = converted['email']
senha = converted['senha']
os.system('clear') or None
print("================================")
print("Nome : " + nome)
print("Data de Nascimento : " + dataNasc)
print("CPF : " + cpf)
print("Email : " + email)
print("Senha : " + senha)
print("================================")
def client():
_thread.start_new_thread(enviar,())
_thread.start_new_thread(receberConfirmacao,())
_thread.start_new_thread(verPerfil,())
ri = 'nao'
ctx = zmq.Context()
sock = ctx.socket(zmq.PUB)
sock.connect(f"tcp://{IP_ADDRESS}:5500")
opc = None
while opc != "4" :
os.system('clear') or None
print("================================")
print(" 1 - Logar")
print(" 2 - Criar Conta")
print(" 4 - Sair")
print("================================")
opc = input('Digite uma Opcao: ')
if opc == '1' :
os.system('clear') or None
email = input("Digite o email: ")
senha = input("Digite a senha: ")
msg= {}
msg ['codigo'] = 1
msg ['codigo2'] = 1
msg ['email'] = email
msg ['senha'] = senha
msg_json = json.dumps(msg)
fila_msgs.append(msg_json)
if opc == '2':
os.system('clear') or None
nome = input("Digite o seu nome: ")
nascimento = input("Digite sua data Nascimento: ")
endereco = input("Digite seu endereço: ")
cpf = input("Digite seu cpf: ")
email = input("Digite seu Email: ")
senha = input("Digite sua senha: ")
msg= {}
msg ['codigo'] = 2
msg ['codigo2'] = 2
msg ['nome'] = nome
msg ['nascimento'] = nascimento
msg ['endereco'] = endereco
msg ['cpf'] = cpf
msg ['email'] = email
msg ['senha'] = senha
msg_json = json.dumps(msg)
fila_msgs.append(msg_json)
if __name__ == "__main__":
client() | true | true |
f7142c06ba23c1277d4a118307c17e2599d7a4f9 | 8,434 | py | Python | third_party/tlslite/tlslite/utils/cryptomath.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575 | 2015-06-18T23:58:20.000Z | 2022-03-23T09:32:39.000Z | third_party/tlslite/tlslite/utils/cryptomath.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | third_party/tlslite/tlslite/utils/cryptomath.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52 | 2015-07-14T10:40:50.000Z | 2022-03-15T01:11:49.000Z | # Authors:
# Trevor Perrin
# Martin von Loewis - python 3 port
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
# See the LICENSE file for legal information regarding use of this file.
"""cryptomath module
This module has basic math/crypto code."""
from __future__ import print_function
import os
import math
import base64
import binascii
from .compat import *
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Check that os.urandom works
import zlib
length = len(zlib.compress(os.urandom(1000)))
assert(length > 900)
def getRandomBytes(howMany):
b = bytearray(os.urandom(howMany))
assert(len(b) == howMany)
return b
prngName = "os.urandom"
# **************************************************************************
# Simple hash functions
# **************************************************************************
import hmac
import hashlib
def MD5(b):
return bytearray(hashlib.md5(compat26Str(b)).digest())
def SHA1(b):
return bytearray(hashlib.sha1(compat26Str(b)).digest())
def SHA256(b):
return bytearray(hashlib.sha256(compat26Str(b)).digest())
def HMAC_MD5(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.md5).digest())
def HMAC_SHA1(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.sha1).digest())
def HMAC_SHA256(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.sha256).digest())
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(b):
total = 0
multiplier = 1
for count in range(len(b)-1, -1, -1):
byte = b[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToByteArray(n, howManyBytes=None):
"""Convert an integer into a bytearray, zero-pad to howManyBytes.
The returned bytearray may be smaller than howManyBytes, but will
not be larger. The returned bytearray will contain a big-endian
encoding of the input integer (n).
"""
if howManyBytes == None:
howManyBytes = numBytes(n)
b = bytearray(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
b[count] = int(n % 256)
n >>= 8
return b
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
b = bytearray(mpi[4:])
return bytesToNumber(b)
def numberToMPI(n):
b = numberToByteArray(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
b = bytearray(4+ext) + b
b[0] = (length >> 24) & 0xFF
b[1] = (length >> 16) & 0xFF
b[2] = (length >> 8) & 0xFF
b[3] = length & 0xFF
return bytes(b)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
q = d // c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return long(result)
else:
def powMod(base, power, modulus):
if power < 0:
result = pow(base, power*-1, modulus)
result = invMod(result, modulus)
return result
else:
return pow(base, power, modulus)
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = list(range(n))
for count in range(2, int(math.sqrt(n))+1):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print("*", end=' ')
s, t = n-1, 0
while s % 2 == 0:
s, t = s//2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = ((2 ** (bits-1)) * 3) // 2
high = 2 ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print(".", end=' ')
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3//2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print(".", end=' ')
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
| 26.945687 | 76 | 0.509248 |
from __future__ import print_function
import os
import math
import base64
import binascii
from .compat import *
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
import zlib
length = len(zlib.compress(os.urandom(1000)))
assert(length > 900)
def getRandomBytes(howMany):
b = bytearray(os.urandom(howMany))
assert(len(b) == howMany)
return b
prngName = "os.urandom"
import hmac
import hashlib
def MD5(b):
return bytearray(hashlib.md5(compat26Str(b)).digest())
def SHA1(b):
return bytearray(hashlib.sha1(compat26Str(b)).digest())
def SHA256(b):
return bytearray(hashlib.sha256(compat26Str(b)).digest())
def HMAC_MD5(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.md5).digest())
def HMAC_SHA1(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.sha1).digest())
def HMAC_SHA256(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.sha256).digest())
def bytesToNumber(b):
total = 0
multiplier = 1
for count in range(len(b)-1, -1, -1):
byte = b[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToByteArray(n, howManyBytes=None):
if howManyBytes == None:
howManyBytes = numBytes(n)
b = bytearray(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
b[count] = int(n % 256)
n >>= 8
return b
def mpiToNumber(mpi):
if (ord(mpi[4]) & 0x80) !=0:
raise AssertionError()
b = bytearray(mpi[4:])
return bytesToNumber(b)
def numberToMPI(n):
b = numberToByteArray(n)
ext = 0
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
b = bytearray(4+ext) + b
b[0] = (length >> 24) & 0xFF
b[1] = (length >> 16) & 0xFF
b[2] = (length >> 8) & 0xFF
b[3] = length & 0xFF
return bytes(b)
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
q = d // c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return long(result)
else:
def powMod(base, power, modulus):
if power < 0:
result = pow(base, power*-1, modulus)
result = invMod(result, modulus)
return result
else:
return pow(base, power, modulus)
def makeSieve(n):
sieve = list(range(n))
for count in range(2, int(math.sqrt(n))+1):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
for x in sieve:
if x >= n: return True
if n % x == 0: return False
if display: print("*", end=' ')
s, t = n-1, 0
while s % 2 == 0:
s, t = s//2, t+1
a = 2
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#29 % 30 and keep them there
low = ((2 ** (bits-1)) * 3) // 2
high = 2 ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print(".", end=' ')
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
low = (2 ** (bits-2)) * 3//2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print(".", end=' ')
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
| true | true |
f7142c82611ea8aa0b9d89d7f852e92f1eb6cabd | 2,892 | py | Python | implementation/server/factories/files.py | Aincient/cleo | 933ef372fa7847d943206d72bfb03c201dbafbd6 | [
"Apache-2.0"
] | null | null | null | implementation/server/factories/files.py | Aincient/cleo | 933ef372fa7847d943206d72bfb03c201dbafbd6 | [
"Apache-2.0"
] | null | null | null | implementation/server/factories/files.py | Aincient/cleo | 933ef372fa7847d943206d72bfb03c201dbafbd6 | [
"Apache-2.0"
] | 3 | 2018-10-01T12:04:36.000Z | 2021-01-07T09:30:50.000Z | """
Files for testing.
"""
import base64
import tempfile
from PIL import Image
from six import BytesIO
__all__ = (
'BASE64_PREFIX',
'TEMPORARY_FILE_LIST',
'TEMPORARY_FILE_LIST_FILE_CONTENT',
'TEMPORARY_FILE_LIST_FILE_BASE64',
'TEMPORARY_FILE_VIEW',
'TEMPORARY_FILE_VIEW_FILE_CONTENT',
'TEMPORARY_FILE_VIEW_FILE_BASE64',
'TEMPORARY_FILE_ADD',
'TEMPORARY_FILE_ADD_FILE_CONTENT',
'TEMPORARY_FILE_ADD_FILE_BASE64',
'TEMPORARY_FILE_CHANGE',
'TEMPORARY_FILE_CHANGE_FILE_CONTENT',
'TEMPORARY_FILE_CHANGE_FILE_BASE64',
'TEMPORARY_FILE_CHANGE_CHANGED',
'TEMPORARY_FILE_CHANGE_CHANGED_FILE_CONTENT',
'TEMPORARY_FILE_CHANGE_CHANGED_FILE_BASE64',
'TEMPORARY_FILE_DELETE',
'TEMPORARY_FILE_DELETE_FILE_CONTENT',
'TEMPORARY_FILE_DELETE_FILE_BASE64',
)
def get_temporary_file(prefix):
"""Get a temporary file.
:return:
"""
image = Image.new('RGBA', size=(100, 100), color=(256, 0, 0))
tmp_file = BytesIO()
_tmp_file = tempfile.NamedTemporaryFile(prefix=prefix, suffix='.png')
image.save(tmp_file, "PNG")
tmp_file.seek(0)
tmp_file.name = _tmp_file.name
return tmp_file
BASE64_PREFIX = 'data:image/png;base64,'
TEMPORARY_FILE_LIST = get_temporary_file(prefix='LIST')
TEMPORARY_FILE_LIST_FILE_CONTENT = TEMPORARY_FILE_LIST.read()
TEMPORARY_FILE_LIST_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_LIST_FILE_CONTENT
).decode()
TEMPORARY_FILE_LIST.seek(0)
TEMPORARY_FILE_VIEW = get_temporary_file(prefix='VIEW')
TEMPORARY_FILE_VIEW_FILE_CONTENT = TEMPORARY_FILE_VIEW.read()
TEMPORARY_FILE_VIEW_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_VIEW_FILE_CONTENT
).decode()
TEMPORARY_FILE_VIEW.seek(0)
TEMPORARY_FILE_ADD = get_temporary_file(prefix='ADD')
TEMPORARY_FILE_ADD_FILE_CONTENT = TEMPORARY_FILE_ADD.read()
TEMPORARY_FILE_ADD_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_ADD_FILE_CONTENT
).decode()
TEMPORARY_FILE_ADD.seek(0)
TEMPORARY_FILE_CHANGE = get_temporary_file(prefix='CHANGE')
TEMPORARY_FILE_CHANGE_FILE_CONTENT = TEMPORARY_FILE_CHANGE.read()
TEMPORARY_FILE_CHANGE_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_CHANGE_FILE_CONTENT
).decode()
TEMPORARY_FILE_CHANGE.seek(0)
TEMPORARY_FILE_CHANGE_CHANGED = get_temporary_file(prefix='CHANGE_CHANGED')
TEMPORARY_FILE_CHANGE_CHANGED_FILE_CONTENT = \
TEMPORARY_FILE_CHANGE_CHANGED.read()
TEMPORARY_FILE_CHANGE_CHANGED_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_CHANGE_CHANGED_FILE_CONTENT
).decode()
TEMPORARY_FILE_CHANGE_CHANGED.seek(0)
TEMPORARY_FILE_DELETE = get_temporary_file(prefix='DELETE')
TEMPORARY_FILE_DELETE_FILE_CONTENT = TEMPORARY_FILE_DELETE.read()
TEMPORARY_FILE_DELETE_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_DELETE_FILE_CONTENT
).decode()
TEMPORARY_FILE_DELETE.seek(0)
| 31.434783 | 77 | 0.802213 |
import base64
import tempfile
from PIL import Image
from six import BytesIO
__all__ = (
'BASE64_PREFIX',
'TEMPORARY_FILE_LIST',
'TEMPORARY_FILE_LIST_FILE_CONTENT',
'TEMPORARY_FILE_LIST_FILE_BASE64',
'TEMPORARY_FILE_VIEW',
'TEMPORARY_FILE_VIEW_FILE_CONTENT',
'TEMPORARY_FILE_VIEW_FILE_BASE64',
'TEMPORARY_FILE_ADD',
'TEMPORARY_FILE_ADD_FILE_CONTENT',
'TEMPORARY_FILE_ADD_FILE_BASE64',
'TEMPORARY_FILE_CHANGE',
'TEMPORARY_FILE_CHANGE_FILE_CONTENT',
'TEMPORARY_FILE_CHANGE_FILE_BASE64',
'TEMPORARY_FILE_CHANGE_CHANGED',
'TEMPORARY_FILE_CHANGE_CHANGED_FILE_CONTENT',
'TEMPORARY_FILE_CHANGE_CHANGED_FILE_BASE64',
'TEMPORARY_FILE_DELETE',
'TEMPORARY_FILE_DELETE_FILE_CONTENT',
'TEMPORARY_FILE_DELETE_FILE_BASE64',
)
def get_temporary_file(prefix):
image = Image.new('RGBA', size=(100, 100), color=(256, 0, 0))
tmp_file = BytesIO()
_tmp_file = tempfile.NamedTemporaryFile(prefix=prefix, suffix='.png')
image.save(tmp_file, "PNG")
tmp_file.seek(0)
tmp_file.name = _tmp_file.name
return tmp_file
BASE64_PREFIX = 'data:image/png;base64,'
TEMPORARY_FILE_LIST = get_temporary_file(prefix='LIST')
TEMPORARY_FILE_LIST_FILE_CONTENT = TEMPORARY_FILE_LIST.read()
TEMPORARY_FILE_LIST_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_LIST_FILE_CONTENT
).decode()
TEMPORARY_FILE_LIST.seek(0)
TEMPORARY_FILE_VIEW = get_temporary_file(prefix='VIEW')
TEMPORARY_FILE_VIEW_FILE_CONTENT = TEMPORARY_FILE_VIEW.read()
TEMPORARY_FILE_VIEW_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_VIEW_FILE_CONTENT
).decode()
TEMPORARY_FILE_VIEW.seek(0)
TEMPORARY_FILE_ADD = get_temporary_file(prefix='ADD')
TEMPORARY_FILE_ADD_FILE_CONTENT = TEMPORARY_FILE_ADD.read()
TEMPORARY_FILE_ADD_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_ADD_FILE_CONTENT
).decode()
TEMPORARY_FILE_ADD.seek(0)
TEMPORARY_FILE_CHANGE = get_temporary_file(prefix='CHANGE')
TEMPORARY_FILE_CHANGE_FILE_CONTENT = TEMPORARY_FILE_CHANGE.read()
TEMPORARY_FILE_CHANGE_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_CHANGE_FILE_CONTENT
).decode()
TEMPORARY_FILE_CHANGE.seek(0)
TEMPORARY_FILE_CHANGE_CHANGED = get_temporary_file(prefix='CHANGE_CHANGED')
TEMPORARY_FILE_CHANGE_CHANGED_FILE_CONTENT = \
TEMPORARY_FILE_CHANGE_CHANGED.read()
TEMPORARY_FILE_CHANGE_CHANGED_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_CHANGE_CHANGED_FILE_CONTENT
).decode()
TEMPORARY_FILE_CHANGE_CHANGED.seek(0)
TEMPORARY_FILE_DELETE = get_temporary_file(prefix='DELETE')
TEMPORARY_FILE_DELETE_FILE_CONTENT = TEMPORARY_FILE_DELETE.read()
TEMPORARY_FILE_DELETE_FILE_BASE64 = BASE64_PREFIX + base64.b64encode(
TEMPORARY_FILE_DELETE_FILE_CONTENT
).decode()
TEMPORARY_FILE_DELETE.seek(0)
| true | true |
f7142cca0f48800d8b25507f5d1b79a5a49af070 | 245 | py | Python | simdata/hakata/script/dummy_db.py | RDC4Smart-Mobility/UniSim | 872a22ccdac859b9a12f11a9f5d20467e9db18ee | [
"MIT"
] | null | null | null | simdata/hakata/script/dummy_db.py | RDC4Smart-Mobility/UniSim | 872a22ccdac859b9a12f11a9f5d20467e9db18ee | [
"MIT"
] | null | null | null | simdata/hakata/script/dummy_db.py | RDC4Smart-Mobility/UniSim | 872a22ccdac859b9a12f11a9f5d20467e9db18ee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from unisim import DB
class DummyDB(DB):
def connect(self):
pass
def disconnect(self):
pass
def init_table(self):
pass
def store(self, tick, objects):
pass
| 14.411765 | 35 | 0.526531 |
from unisim import DB
class DummyDB(DB):
def connect(self):
pass
def disconnect(self):
pass
def init_table(self):
pass
def store(self, tick, objects):
pass
| true | true |
f7142d1dd2c3894eb628d06b70747641aac633ec | 7,231 | py | Python | paramunittest.py | rik0/ParamUnittest | e064fb382c6da355ae7242e79ea1bf14fb2b43e9 | [
"BSD-2-Clause"
] | 7 | 2016-03-17T07:34:39.000Z | 2019-08-09T05:31:38.000Z | paramunittest.py | rik0/ParamUnittest | e064fb382c6da355ae7242e79ea1bf14fb2b43e9 | [
"BSD-2-Clause"
] | 2 | 2015-01-18T03:35:14.000Z | 2017-03-27T18:11:41.000Z | paramunittest.py | rik0/ParamUnittest | e064fb382c6da355ae7242e79ea1bf14fb2b43e9 | [
"BSD-2-Clause"
] | 4 | 2015-10-23T07:42:31.000Z | 2021-01-15T02:28:11.000Z | # Copyright 2012 Enrico Franchi
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import unittest
import collections
import importlib
__all__ = [
'parametrized',
'ParametrizedTestCase',
]
def _process_parameters(parameters_seq):
processed_parameters_seq = []
for parameters in parameters_seq:
if isinstance(parameters, collections.Mapping):
processed_parameters_seq.append((tuple(),
dict(parameters)))
elif (len(parameters) == 2
and isinstance(parameters[0], collections.Sequence)
and isinstance(parameters[1], collections.Mapping)):
processed_parameters_seq.append((tuple(parameters[0]),
dict(parameters[1])))
else:
processed_parameters_seq.append((tuple(parameters),
dict()))
return processed_parameters_seq
def _build_name(name, index):
return '%s_%d' % (name, index)
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
class ParametrizedTestCase(unittest.TestCase):
def setParameters(self, *args, **kwargs):
raise NotImplementedError(
('setParameters must be implemented '
'because it receives the parameters.'))
def getParameters(self):
"""
Return the parameters with which this test case was instantiated.
"""
raise NotImplementedError(
'getParameters should have been patched by parametrized.')
def getFullParametersSequence(self):
raise NotImplementedError(
'getFullParametersSequence should have been patched by parametrized.')
def getTestCaseIndex(self):
"""
Return the index of the current test case according to the list of
parametes passed to parametrized.
"""
raise NotImplementedError(
'getTestCaseIndex should have been patched by parametrized.')
def getFullParametersSequence(self):
"""
Return the full normalized list of parameters passed to parametrized.
"""
raise NotImplementedError(
'getFullParametersSequence should have been patched by parametrized.')
def __str__(self):
try:
return "%s[%d](%s) (%s)" % (self._testMethodName,
self.getTestCaseIndex(),
self.getParameters(),
strclass(self.__class__))
except NotImplementedError:
return "%s[...](...) (%s)" % (self._testMethodName,
strclass(self.__class__))
def __repr__(self):
try:
return "<%s[%d](%s) testMethod=%s>" % (strclass(self.__class__),
self.getTestCaseIndex(),
self.getParameters(),
self._testMethodName)
except NotImplementedError:
return "<%s[...](...) testMethod=%s>" % (strclass(self.__class__),
self._testMethodName)
class PropagateSetAttr(type):
def __new__(mcs, name, bases, dct):
dct['setattr_observers'] = []
cls = super(PropagateSetAttr, mcs).__new__(mcs, name, bases, dct)
return cls
def __setattr__(cls, key, value):
for observer in cls.setattr_observers:
setattr(observer, key, value)
def make_propagator(cls, setattr_observers):
SkippableTest = PropagateSetAttr('SkippableTest', (unittest.TestCase,),
{})
SkippableTest.setattr_observers.extend(setattr_observers)
return SkippableTest
def parametrized(*parameters_seq):
parameters_seq = _process_parameters(parameters_seq)
def magic_module_set_test_case(cls):
if not hasattr(cls, 'setParameters'):
raise TypeError('%s does not have a setParameters method.' % (
cls.__name__, ))
module = importlib.import_module(cls.__module__)
generated_test_cases = []
for index, parameters in enumerate(parameters_seq):
name = _build_name(cls.__name__, index)
def closing_over(parameters=parameters, index=index):
def setUp(self):
self.setParameters(*parameters[0], **parameters[1])
cls.setUp(self)
def getParameters(self):
"""
Return the parameters with which this test case was instantiated.
"""
return parameters
def getTestCaseIndex(self):
"""
Return the index of the current test case according to the list of
parametes passed to parametrized.
"""
return index
def getFullParametersSequence(self):
"""
Return the full normalized list of parameters passed to parametrized.
"""
return copy.copy(parameters_seq)
return setUp, getParameters, getTestCaseIndex, getFullParametersSequence
(set_up, get_parameters,
get_test_case_index,
get_full_parameters_sequence) = closing_over()
new_class = type(name, (cls, ),
{'setUp': set_up,
'getParameters': get_parameters,
'getTestCaseIndex': get_test_case_index,
'getFullParametersSequence': get_full_parameters_sequence})
generated_test_cases.append(new_class)
setattr(module, name, new_class)
return make_propagator(cls, generated_test_cases)
return magic_module_set_test_case
| 40.396648 | 89 | 0.603098 |
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
import copy
import unittest
import collections
import importlib
__all__ = [
'parametrized',
'ParametrizedTestCase',
]
def _process_parameters(parameters_seq):
processed_parameters_seq = []
for parameters in parameters_seq:
if isinstance(parameters, collections.Mapping):
processed_parameters_seq.append((tuple(),
dict(parameters)))
elif (len(parameters) == 2
and isinstance(parameters[0], collections.Sequence)
and isinstance(parameters[1], collections.Mapping)):
processed_parameters_seq.append((tuple(parameters[0]),
dict(parameters[1])))
else:
processed_parameters_seq.append((tuple(parameters),
dict()))
return processed_parameters_seq
def _build_name(name, index):
return '%s_%d' % (name, index)
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
class ParametrizedTestCase(unittest.TestCase):
def setParameters(self, *args, **kwargs):
raise NotImplementedError(
('setParameters must be implemented '
'because it receives the parameters.'))
def getParameters(self):
raise NotImplementedError(
'getParameters should have been patched by parametrized.')
def getFullParametersSequence(self):
raise NotImplementedError(
'getFullParametersSequence should have been patched by parametrized.')
def getTestCaseIndex(self):
raise NotImplementedError(
'getTestCaseIndex should have been patched by parametrized.')
def getFullParametersSequence(self):
raise NotImplementedError(
'getFullParametersSequence should have been patched by parametrized.')
def __str__(self):
try:
return "%s[%d](%s) (%s)" % (self._testMethodName,
self.getTestCaseIndex(),
self.getParameters(),
strclass(self.__class__))
except NotImplementedError:
return "%s[...](...) (%s)" % (self._testMethodName,
strclass(self.__class__))
def __repr__(self):
try:
return "<%s[%d](%s) testMethod=%s>" % (strclass(self.__class__),
self.getTestCaseIndex(),
self.getParameters(),
self._testMethodName)
except NotImplementedError:
return "<%s[...](...) testMethod=%s>" % (strclass(self.__class__),
self._testMethodName)
class PropagateSetAttr(type):
def __new__(mcs, name, bases, dct):
dct['setattr_observers'] = []
cls = super(PropagateSetAttr, mcs).__new__(mcs, name, bases, dct)
return cls
def __setattr__(cls, key, value):
for observer in cls.setattr_observers:
setattr(observer, key, value)
def make_propagator(cls, setattr_observers):
SkippableTest = PropagateSetAttr('SkippableTest', (unittest.TestCase,),
{})
SkippableTest.setattr_observers.extend(setattr_observers)
return SkippableTest
def parametrized(*parameters_seq):
parameters_seq = _process_parameters(parameters_seq)
def magic_module_set_test_case(cls):
if not hasattr(cls, 'setParameters'):
raise TypeError('%s does not have a setParameters method.' % (
cls.__name__, ))
module = importlib.import_module(cls.__module__)
generated_test_cases = []
for index, parameters in enumerate(parameters_seq):
name = _build_name(cls.__name__, index)
def closing_over(parameters=parameters, index=index):
def setUp(self):
self.setParameters(*parameters[0], **parameters[1])
cls.setUp(self)
def getParameters(self):
return parameters
def getTestCaseIndex(self):
return index
def getFullParametersSequence(self):
return copy.copy(parameters_seq)
return setUp, getParameters, getTestCaseIndex, getFullParametersSequence
(set_up, get_parameters,
get_test_case_index,
get_full_parameters_sequence) = closing_over()
new_class = type(name, (cls, ),
{'setUp': set_up,
'getParameters': get_parameters,
'getTestCaseIndex': get_test_case_index,
'getFullParametersSequence': get_full_parameters_sequence})
generated_test_cases.append(new_class)
setattr(module, name, new_class)
return make_propagator(cls, generated_test_cases)
return magic_module_set_test_case
| true | true |
f7142e78dcfc85a5990b30355dbe0eeb484752fd | 1,454 | py | Python | download_data.py | EugenHotaj/ray-automl | f516c06f8c24559edac120941cd36e8720ecd228 | [
"MIT"
] | null | null | null | download_data.py | EugenHotaj/ray-automl | f516c06f8c24559edac120941cd36e8720ecd228 | [
"MIT"
] | null | null | null | download_data.py | EugenHotaj/ray-automl | f516c06f8c24559edac120941cd36e8720ecd228 | [
"MIT"
] | null | null | null | """Script to download and cache all data."""
import os
from typing import List
import openml
from automl import openml_utils
BENCHMARK_TASKS = {"adult": 7592, "nomao": 9977, "phoneme": 9952}
FOLD_COL = "fold"
def download_openml_tasks(task_ids: List[int]):
"""Downloads the given task_ids from OpenML and dumps them as OpenMLTasks."""
tasks = openml.tasks.get_tasks(
task_ids, download_data=True, download_qualities=False
)
for task in tasks:
dataset = task.get_dataset()
df, _, categorical, columns = dataset.get_data()
label_col = dataset.default_target_attribute
feature_cols = [col for col in columns if col != label_col]
numerical_cols = [col for ind, col in zip(categorical, feature_cols) if not ind]
categorical_cols = [col for ind, col in zip(categorical, feature_cols) if ind]
df[FOLD_COL] = -1
splits = task.download_split().split[0] # We assume one repetition.
for split, idxs in splits.items():
idxs = idxs[0].test
df.loc[idxs, FOLD_COL] = split
out_path = openml_utils.task_path(task.task_id)
os.makedirs(os.path.dirname(out_path), exist_ok=True)
task = openml_utils.OpenMLTask(
df, feature_cols, numerical_cols, categorical_cols, label_col, FOLD_COL
)
task.dump(out_path)
if __name__ == "__main__":
download_openml_tasks(list(BENCHMARK_TASKS.values()))
| 33.813953 | 88 | 0.672627 |
import os
from typing import List
import openml
from automl import openml_utils
BENCHMARK_TASKS = {"adult": 7592, "nomao": 9977, "phoneme": 9952}
FOLD_COL = "fold"
def download_openml_tasks(task_ids: List[int]):
tasks = openml.tasks.get_tasks(
task_ids, download_data=True, download_qualities=False
)
for task in tasks:
dataset = task.get_dataset()
df, _, categorical, columns = dataset.get_data()
label_col = dataset.default_target_attribute
feature_cols = [col for col in columns if col != label_col]
numerical_cols = [col for ind, col in zip(categorical, feature_cols) if not ind]
categorical_cols = [col for ind, col in zip(categorical, feature_cols) if ind]
df[FOLD_COL] = -1
splits = task.download_split().split[0]
for split, idxs in splits.items():
idxs = idxs[0].test
df.loc[idxs, FOLD_COL] = split
out_path = openml_utils.task_path(task.task_id)
os.makedirs(os.path.dirname(out_path), exist_ok=True)
task = openml_utils.OpenMLTask(
df, feature_cols, numerical_cols, categorical_cols, label_col, FOLD_COL
)
task.dump(out_path)
if __name__ == "__main__":
download_openml_tasks(list(BENCHMARK_TASKS.values()))
| true | true |
f7142ea121c4efd6ef516ca222b10a3ea61550d2 | 3,746 | py | Python | data_loader.py | SmirnovKol/recurrent-visual-attention | 4cb8d9e768ae35f38439278bb8a7b4d6b253a537 | [
"MIT"
] | 463 | 2017-12-25T12:36:08.000Z | 2022-03-29T17:05:19.000Z | data_loader.py | Pandinosaurus/recurrent-visual-attention | a38ac8958ebf1c61a10c4d5320f1e31d3d0b73dd | [
"MIT"
] | 44 | 2018-01-16T08:41:36.000Z | 2021-12-17T06:23:13.000Z | data_loader.py | Pandinosaurus/recurrent-visual-attention | a38ac8958ebf1c61a10c4d5320f1e31d3d0b73dd | [
"MIT"
] | 135 | 2017-12-26T05:09:03.000Z | 2022-03-27T00:40:42.000Z | import numpy as np
from utils import plot_images
import torch
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
def get_train_valid_loader(
data_dir,
batch_size,
random_seed,
valid_size=0.1,
shuffle=True,
show_sample=False,
num_workers=4,
pin_memory=False,
):
"""Train and validation data loaders.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Args:
data_dir: path directory to the dataset.
batch_size: how many samples per batch to load.
random_seed: fix seed for reproducibility.
valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
In the paper, this number is set to 0.1.
shuffle: whether to shuffle the train/validation indices.
show_sample: plot 9x9 sample grid of the dataset.
num_workers: number of subprocesses to use when loading the dataset.
pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert (valid_size >= 0) and (valid_size <= 1), error_msg
# define transforms
normalize = transforms.Normalize((0.1307,), (0.3081,))
trans = transforms.Compose([transforms.ToTensor(), normalize])
# load dataset
dataset = datasets.MNIST(data_dir, train=True, download=True, transform=trans)
num_train = len(dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=train_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=valid_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
)
# visualize some images
if show_sample:
sample_loader = torch.utils.data.DataLoader(
dataset,
batch_size=9,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory,
)
data_iter = iter(sample_loader)
images, labels = data_iter.next()
X = images.numpy()
X = np.transpose(X, [0, 2, 3, 1])
plot_images(X, labels)
return (train_loader, valid_loader)
def get_test_loader(data_dir, batch_size, num_workers=4, pin_memory=False):
"""Test datalaoder.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Args:
data_dir: path directory to the dataset.
batch_size: how many samples per batch to load.
num_workers: number of subprocesses to use when loading the dataset.
pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
"""
# define transforms
normalize = transforms.Normalize((0.1307,), (0.3081,))
trans = transforms.Compose([transforms.ToTensor(), normalize])
# load dataset
dataset = datasets.MNIST(data_dir, train=False, download=True, transform=trans)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
)
return data_loader
| 30.704918 | 83 | 0.664976 | import numpy as np
from utils import plot_images
import torch
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
def get_train_valid_loader(
data_dir,
batch_size,
random_seed,
valid_size=0.1,
shuffle=True,
show_sample=False,
num_workers=4,
pin_memory=False,
):
error_msg = "[!] valid_size should be in the range [0, 1]."
assert (valid_size >= 0) and (valid_size <= 1), error_msg
normalize = transforms.Normalize((0.1307,), (0.3081,))
trans = transforms.Compose([transforms.ToTensor(), normalize])
dataset = datasets.MNIST(data_dir, train=True, download=True, transform=trans)
num_train = len(dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=train_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=valid_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
)
if show_sample:
sample_loader = torch.utils.data.DataLoader(
dataset,
batch_size=9,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory,
)
data_iter = iter(sample_loader)
images, labels = data_iter.next()
X = images.numpy()
X = np.transpose(X, [0, 2, 3, 1])
plot_images(X, labels)
return (train_loader, valid_loader)
def get_test_loader(data_dir, batch_size, num_workers=4, pin_memory=False):
normalize = transforms.Normalize((0.1307,), (0.3081,))
trans = transforms.Compose([transforms.ToTensor(), normalize])
dataset = datasets.MNIST(data_dir, train=False, download=True, transform=trans)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
)
return data_loader
| true | true |
f7142ff349e7ada53e51a9c796f37baacff04ec9 | 1,290 | py | Python | cl_progress/cl_progress.py | CORDEA/myPythonModules | 790674a8f155a94804242b9b220eb6ac6efc8328 | [
"Apache-2.0"
] | null | null | null | cl_progress/cl_progress.py | CORDEA/myPythonModules | 790674a8f155a94804242b9b220eb6ac6efc8328 | [
"Apache-2.0"
] | null | null | null | cl_progress/cl_progress.py | CORDEA/myPythonModules | 790674a8f155a94804242b9b220eb6ac6efc8328 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding:utf-8
#
# Copyright 2015-2017 Yoshihiro Tanaka
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__Author__ = "Yoshihiro Tanaka"
__date__ = "2015-02-02"
def progress(sent, flag):
import sys, commands
_SUC = '[SUCCEED]'
_FAL = '[FAILED]'
# ref. http://d.hatena.ne.jp/heavenshell/20090909/1252509749
colors = {'clear': '\033[0m', 'red': '\033[31m', 'green': '\033[32m'}
width = int(commands.getoutput('stty size').split()[1])
if flag:
result = _SUC
color = 'green'
else:
result = _FAL
color = 'red'
spaces = width - (len(sent) + len(result))
sys.stdout.write('%s%s' % (colors['clear'], sent + (' ' * spaces)))
sys.stdout.write('%s%s%s\n' % (colors[color], result, colors['clear']))
| 30 | 75 | 0.66124 |
__Author__ = "Yoshihiro Tanaka"
__date__ = "2015-02-02"
def progress(sent, flag):
import sys, commands
_SUC = '[SUCCEED]'
_FAL = '[FAILED]'
colors = {'clear': '\033[0m', 'red': '\033[31m', 'green': '\033[32m'}
width = int(commands.getoutput('stty size').split()[1])
if flag:
result = _SUC
color = 'green'
else:
result = _FAL
color = 'red'
spaces = width - (len(sent) + len(result))
sys.stdout.write('%s%s' % (colors['clear'], sent + (' ' * spaces)))
sys.stdout.write('%s%s%s\n' % (colors[color], result, colors['clear']))
| true | true |
f71430b176a3802c19f4d2638a14ba0259909022 | 863 | py | Python | src/utils/osrm.py | sashakh/vroom-scripts | 46b8abce2d8680f5f854965cccf57ac7856fe092 | [
"BSD-2-Clause"
] | null | null | null | src/utils/osrm.py | sashakh/vroom-scripts | 46b8abce2d8680f5f854965cccf57ac7856fe092 | [
"BSD-2-Clause"
] | null | null | null | src/utils/osrm.py | sashakh/vroom-scripts | 46b8abce2d8680f5f854965cccf57ac7856fe092 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
DEFAULT_IP = '0.0.0.0'
DEFAULT_PORT = '5000'
def format_request(service,
locs,
ip = DEFAULT_IP,
port = DEFAULT_PORT):
req = 'http://' + ip + ':' + port + '/'
req += service + '/v1/car/'
for loc in locs:
req += str(loc[0]) + ',' + str(loc[1]) + ';'
return req[:-1]
def route(locs,
extra_args = '',
ip = DEFAULT_IP,
port = DEFAULT_PORT):
# Building request.
req = format_request('route', locs, ip, port)
req += '?alternatives=false&steps=false&overview=full&continue_straight=false'
req += extra_args
return requests.get(req).json()
def table(locs,
ip = DEFAULT_IP,
port = DEFAULT_PORT):
req = format_request('table', locs, ip, port)
return requests.get(req).json()
| 23.324324 | 80 | 0.559676 |
import requests
DEFAULT_IP = '0.0.0.0'
DEFAULT_PORT = '5000'
def format_request(service,
locs,
ip = DEFAULT_IP,
port = DEFAULT_PORT):
req = 'http://' + ip + ':' + port + '/'
req += service + '/v1/car/'
for loc in locs:
req += str(loc[0]) + ',' + str(loc[1]) + ';'
return req[:-1]
def route(locs,
extra_args = '',
ip = DEFAULT_IP,
port = DEFAULT_PORT):
req = format_request('route', locs, ip, port)
req += '?alternatives=false&steps=false&overview=full&continue_straight=false'
req += extra_args
return requests.get(req).json()
def table(locs,
ip = DEFAULT_IP,
port = DEFAULT_PORT):
req = format_request('table', locs, ip, port)
return requests.get(req).json()
| true | true |
f7143197fc3c82b21a8db9b00f7324492cb578fa | 1,210 | py | Python | src/prometheus_async/__init__.py | hynek/prometheus_async | 4abb25ac4f893c951131123989013df1286338d0 | [
"Apache-2.0"
] | 49 | 2015-10-03T00:04:12.000Z | 2019-05-13T10:32:02.000Z | src/prometheus_async/__init__.py | hynek/prometheus_async | 4abb25ac4f893c951131123989013df1286338d0 | [
"Apache-2.0"
] | 13 | 2015-10-07T21:15:23.000Z | 2019-02-09T17:12:46.000Z | src/prometheus_async/__init__.py | hynek/prometheus_async | 4abb25ac4f893c951131123989013df1286338d0 | [
"Apache-2.0"
] | 12 | 2015-10-15T23:05:03.000Z | 2019-02-09T15:49:07.000Z | # SPDX-License-Identifier: Apache-2.0
#
# Copyright 2016 Hynek Schlawack
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Async helpers for prometheus_client.
"""
__version__ = "22.3.0.dev0"
__title__ = "prometheus_async"
# __doc__ is None in when running with -OO / PYTHONOPTIMIZE=2.
__description__ = (__doc__ or "").strip()
__uri__ = "https://prometheus-async.readthedocs.io/"
__author__ = "Hynek Schlawack"
__email__ = "hs@ox.cx"
__license__ = "Apache License, Version 2.0"
__copyright__ = f"Copyright (c) 2016 {__author__}"
from . import aio
__all__ = ["aio"]
try:
from . import tx # noqa -- flake8 doesn't understand __all__.append
__all__.append("tx")
except ImportError:
pass
| 25.744681 | 74 | 0.733058 |
__version__ = "22.3.0.dev0"
__title__ = "prometheus_async"
__description__ = (__doc__ or "").strip()
__uri__ = "https://prometheus-async.readthedocs.io/"
__author__ = "Hynek Schlawack"
__email__ = "hs@ox.cx"
__license__ = "Apache License, Version 2.0"
__copyright__ = f"Copyright (c) 2016 {__author__}"
from . import aio
__all__ = ["aio"]
try:
from . import tx
__all__.append("tx")
except ImportError:
pass
| true | true |
f71431a16aaaf2c0f14e8c3eceaefa14bf68a0e5 | 5,134 | py | Python | scrolls/errors.py | a-bison/scrolls-py | cd531bd0755a107e79afc5bd8a23f0905e1fc120 | [
"BSD-3-Clause"
] | null | null | null | scrolls/errors.py | a-bison/scrolls-py | cd531bd0755a107e79afc5bd8a23f0905e1fc120 | [
"BSD-3-Clause"
] | null | null | null | scrolls/errors.py | a-bison/scrolls-py | cd531bd0755a107e79afc5bd8a23f0905e1fc120 | [
"BSD-3-Clause"
] | null | null | null | """
Errors not dependent on any specific Scrolls types.
Typically, you won't need to instantiate any of these yourself. The base exception
for _all_ Scrolls errors is `ScrollError`. Any error that occurs while validating
script syntax or interpreting scripts will inherit from `PositionalError`.
"""
import functools
import math
import typing as t
__all__ = (
"format_positional_error",
"ScrollError",
"PositionalError",
"ParseError",
"ParseEofError",
"ParseExpectError",
"TokenizeError",
"TokenizeEofError"
)
@functools.lru_cache(128)
def format_positional_error(
line: int,
pos: int,
string: str,
message: str,
prior_lines: int = 3
) -> str:
"""Format a positional error generated by Scrolls.
Args:
line: The line the error was generated on.
pos: The character the error was generated on.
string: The script that generated the error.
message: The message associated with the error.
prior_lines: The number of lines that should be printed before the line
the error occurred on. The line containing the error will always
be printed.
Returns:
The formatted error message.
For example:
```text
...
1 print "World"
2 print "Foo"
3 print "Bar"
4 print "bad string
^
line 4 - Unexpected EOF while parsing string literal.
```
If there are more than `prior_lines` lines before the error, `...` will be
prepended to the output.
"""
zfill = max(1, int(math.log10(len(string))))
lines = [f"{n:0{zfill}} {l}" for n, l in enumerate(string.splitlines())]
printed_lines = lines[max(0, line - prior_lines): line + 1]
output_lines = [
*(["..."] if line - prior_lines >= 1 else []),
*printed_lines,
" "*(pos + 1 + zfill) + "^",
f"line {line}: {message}"
]
return "\n".join(output_lines)
class ScrollError(Exception):
"""Base class for all Scrolls-related errors."""
pass
class PositionalError(ScrollError):
"""Generic error that happened somewhere in a script.
Any error in tokenizing, parsing, or interpreting should inherit from this.
Typically you'll never need to instantiate one of these yourself, just catch it
and call `str` on it. This will return a formatted error message pointing to
where the error happened. See `format_positional_error` for more details.
Example usage:
```
try:
some_scrolls_function(...)
except PositionalError as e:
print("error:")
print(str(e))
```
Note that this will apply to any error that inherits from `PositionalError` as well.
If you want to do your own formatting, you can use the instance variables below to
generate your own messages.
"""
def __init__(
self,
line: int,
pos: int,
string: str,
message: str
):
self.line = line
"""The line the error occurred on."""
self.pos = pos
"""The character along `line` the error occurred at."""
self.string = string
"""The string that triggered the error. In all normal cases, this is a script."""
self.message = message
"""The message associated with this error."""
def __str__(self) -> str:
"""
Return a formatted error string pointing out in the script where this error
happened.
"""
return format_positional_error(
self.line,
self.pos,
self.string,
self.message
)
class TokenizeError(PositionalError):
"""Generic error raised while lexing/tokenizing a script."""
pass
class TokenizeEofError(TokenizeError):
"""Raised when the lexer/tokenizer hits an unexpected EOF (end of script)."""
pass
class ParseError(PositionalError):
"""Generic error raised during the parsing stage."""
def __init__(
self,
line: int,
pos: int,
string: str,
message: str
):
super().__init__(
line,
pos,
string,
message
)
# IMPLEMENTATION DETAIL
# Sets whether this parse error is fatal or not. Defaults to `False`.
# If `True`, a `ParseError` will cause all parsing to stop immediately and
# raise the error. If `fatal` is `False`, a parse function may try alternative
# parsing. Internally, `fatal = False` is used by `parse_choice` to determine
# which parsing function to choose. See `scrolls.ast` for more details.
self.fatal = False
class ParseEofError(ParseError):
"""Raised when an EOF is encountered too early while parsing a script."""
pass
class ParseExpectError(ParseError):
"""Raised when an unexpected token is encountered during parsing."""
pass
| 28.681564 | 90 | 0.599533 |
import functools
import math
import typing as t
__all__ = (
"format_positional_error",
"ScrollError",
"PositionalError",
"ParseError",
"ParseEofError",
"ParseExpectError",
"TokenizeError",
"TokenizeEofError"
)
@functools.lru_cache(128)
def format_positional_error(
line: int,
pos: int,
string: str,
message: str,
prior_lines: int = 3
) -> str:
zfill = max(1, int(math.log10(len(string))))
lines = [f"{n:0{zfill}} {l}" for n, l in enumerate(string.splitlines())]
printed_lines = lines[max(0, line - prior_lines): line + 1]
output_lines = [
*(["..."] if line - prior_lines >= 1 else []),
*printed_lines,
" "*(pos + 1 + zfill) + "^",
f"line {line}: {message}"
]
return "\n".join(output_lines)
class ScrollError(Exception):
pass
class PositionalError(ScrollError):
def __init__(
self,
line: int,
pos: int,
string: str,
message: str
):
self.line = line
self.pos = pos
self.string = string
self.message = message
def __str__(self) -> str:
return format_positional_error(
self.line,
self.pos,
self.string,
self.message
)
class TokenizeError(PositionalError):
pass
class TokenizeEofError(TokenizeError):
pass
class ParseError(PositionalError):
def __init__(
self,
line: int,
pos: int,
string: str,
message: str
):
super().__init__(
line,
pos,
string,
message
)
self.fatal = False
class ParseEofError(ParseError):
pass
class ParseExpectError(ParseError):
pass
| true | true |
f71431e0bae919d25b50e4bc0811e7098763a471 | 173 | py | Python | virtual/lib/python3.6/site-packages/pylint/test/functional/broad_except.py | drewheathens/The-Moringa-Tribune | 98ee4d63c9df6f1f7497fc6876960a822d914500 | [
"MIT"
] | 69 | 2019-02-18T12:07:35.000Z | 2022-03-12T10:38:32.000Z | virtual/lib/python3.6/site-packages/pylint/test/functional/broad_except.py | drewheathens/The-Moringa-Tribune | 98ee4d63c9df6f1f7497fc6876960a822d914500 | [
"MIT"
] | 32 | 2018-05-01T05:24:43.000Z | 2022-03-11T23:20:39.000Z | virtual/lib/python3.6/site-packages/pylint/test/functional/broad_except.py | drewheathens/The-Moringa-Tribune | 98ee4d63c9df6f1f7497fc6876960a822d914500 | [
"MIT"
] | 28 | 2019-03-22T01:07:13.000Z | 2022-02-21T16:38:27.000Z | # pylint: disable=missing-docstring
from __future__ import print_function
__revision__ = 0
try:
__revision__ += 1
except Exception: # [broad-except]
print('error')
| 19.222222 | 37 | 0.739884 |
from __future__ import print_function
__revision__ = 0
try:
__revision__ += 1
except Exception:
print('error')
| true | true |
f71431e15f97613abc12e56b17caf9d892de3bd9 | 1,359 | py | Python | setup.py | butla/bravado-falcon | 2c377db486150a6e0b93a4fb5970be9cf3e769d0 | [
"MIT"
] | 2 | 2017-01-16T07:51:35.000Z | 2020-02-17T21:44:13.000Z | setup.py | butla/bravado-falcon | 2c377db486150a6e0b93a4fb5970be9cf3e769d0 | [
"MIT"
] | null | null | null | setup.py | butla/bravado-falcon | 2c377db486150a6e0b93a4fb5970be9cf3e769d0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os.path
from setuptools import setup
project_name = 'bravado-falcon'
version = '0.1.0'
setup_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(setup_dir, 'requirements.txt')) as req_file:
requirements = [lib.split('==')[0] for lib in req_file.readlines()]
with open(os.path.join(setup_dir, 'README.rst')) as readme_file:
readme = readme_file.read()
setup(
name=project_name,
version=version,
description='Integration of Falcon API unit tests with Bravado.',
long_description=readme,
author='Michał Bultrowicz',
author_email='michal.bultrowicz@gmail.com',
url='https://github.com/butla/bravado-falcon',
packages=[
project_name.replace('-', '_'),
],
package_dir={project_name: project_name},
include_package_data=True,
install_requires=requirements,
license="MIT",
keywords='falcon bravado test',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
],
)
| 32.357143 | 71 | 0.65195 |
import os.path
from setuptools import setup
project_name = 'bravado-falcon'
version = '0.1.0'
setup_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(setup_dir, 'requirements.txt')) as req_file:
requirements = [lib.split('==')[0] for lib in req_file.readlines()]
with open(os.path.join(setup_dir, 'README.rst')) as readme_file:
readme = readme_file.read()
setup(
name=project_name,
version=version,
description='Integration of Falcon API unit tests with Bravado.',
long_description=readme,
author='Michał Bultrowicz',
author_email='michal.bultrowicz@gmail.com',
url='https://github.com/butla/bravado-falcon',
packages=[
project_name.replace('-', '_'),
],
package_dir={project_name: project_name},
include_package_data=True,
install_requires=requirements,
license="MIT",
keywords='falcon bravado test',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
],
)
| true | true |
f714331b5f57e69f93e8004c75487a73e41833cf | 1,224 | py | Python | config/urls.py | kdagley/publicrelations | dbf424c247028ed93881a5375b22d196cfeed175 | [
"BSD-3-Clause"
] | null | null | null | config/urls.py | kdagley/publicrelations | dbf424c247028ed93881a5375b22d196cfeed175 | [
"BSD-3-Clause"
] | null | null | null | config/urls.py | kdagley/publicrelations | dbf424c247028ed93881a5375b22d196cfeed175 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("pr.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
| 34.971429 | 91 | 0.693627 |
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
url(r'^admin/', include(admin.site.urls)),
url(r'^users/', include("pr.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
| true | true |
f714342388aea63bff603443250cc030b85ccfb7 | 7,152 | py | Python | specklepy/api/resources/branch.py | jsdbroughton/specklepy | 81a98ea938106001abae308e3cfe04a2c588f06a | [
"Apache-2.0"
] | null | null | null | specklepy/api/resources/branch.py | jsdbroughton/specklepy | 81a98ea938106001abae308e3cfe04a2c588f06a | [
"Apache-2.0"
] | null | null | null | specklepy/api/resources/branch.py | jsdbroughton/specklepy | 81a98ea938106001abae308e3cfe04a2c588f06a | [
"Apache-2.0"
] | null | null | null | from gql import gql
from specklepy.api.resource import ResourceBase
from specklepy.api.models import Branch
from specklepy.logging import metrics
NAME = "branch"
METHODS = ["create"]
class Resource(ResourceBase):
"""API Access class for branches"""
def __init__(self, account, basepath, client) -> None:
super().__init__(
account=account,
basepath=basepath,
client=client,
name=NAME,
methods=METHODS,
)
self.schema = Branch
def create(
self, stream_id: str, name: str, description: str = "No description provided"
) -> str:
"""Create a new branch on this stream
Arguments:
name {str} -- the name of the new branch
description {str} -- a short description of the branch
Returns:
id {str} -- the newly created branch's id
"""
metrics.track(metrics.BRANCH, self.account, {"name": "create"})
query = gql(
"""
mutation BranchCreate($branch: BranchCreateInput!) {
branchCreate(branch: $branch)
}
"""
)
params = {
"branch": {
"streamId": stream_id,
"name": name,
"description": description,
}
}
return self.make_request(
query=query, params=params, return_type="branchCreate", parse_response=False
)
def get(self, stream_id: str, name: str, commits_limit: int = 10):
"""Get a branch by name from a stream
Arguments:
stream_id {str} -- the id of the stream to get the branch from
name {str} -- the name of the branch to get
commits_limit {int} -- maximum number of commits to get
Returns:
Branch -- the fetched branch with its latest commits
"""
metrics.track(metrics.BRANCH, self.account, {"name": "get"})
query = gql(
"""
query BranchGet($stream_id: String!, $name: String!, $commits_limit: Int!) {
stream(id: $stream_id) {
branch(name: $name) {
id,
name,
description,
commits (limit: $commits_limit) {
totalCount,
cursor,
items {
id,
referencedObject,
sourceApplication,
totalChildrenCount,
message,
authorName,
authorId,
branchName,
parents,
createdAt
}
}
}
}
}
"""
)
params = {"stream_id": stream_id, "name": name, "commits_limit": commits_limit}
return self.make_request(
query=query, params=params, return_type=["stream", "branch"]
)
def list(self, stream_id: str, branches_limit: int = 10, commits_limit: int = 10):
"""Get a list of branches from a given stream
Arguments:
stream_id {str} -- the id of the stream to get the branches from
branches_limit {int} -- maximum number of branches to get
commits_limit {int} -- maximum number of commits to get
Returns:
List[Branch] -- the branches on the stream
"""
metrics.track(metrics.BRANCH, self.account, {"name": "get"})
query = gql(
"""
query BranchesGet($stream_id: String!, $branches_limit: Int!, $commits_limit: Int!) {
stream(id: $stream_id) {
branches(limit: $branches_limit) {
items {
id
name
description
commits(limit: $commits_limit) {
totalCount
items{
id
message
referencedObject
sourceApplication
parents
authorId
authorName
branchName
createdAt
}
}
}
}
}
}
"""
)
params = {
"stream_id": stream_id,
"branches_limit": branches_limit,
"commits_limit": commits_limit,
}
return self.make_request(
query=query, params=params, return_type=["stream", "branches", "items"]
)
def update(
self, stream_id: str, branch_id: str, name: str = None, description: str = None
):
"""Update a branch
Arguments:
stream_id {str} -- the id of the stream containing the branch to update
branch_id {str} -- the id of the branch to update
name {str} -- optional: the updated branch name
description {str} -- optional: the updated branch description
Returns:
bool -- True if update is successfull
"""
metrics.track(metrics.BRANCH, self.account, {"name": "update"})
query = gql(
"""
mutation BranchUpdate($branch: BranchUpdateInput!) {
branchUpdate(branch: $branch)
}
"""
)
params = {
"branch": {
"streamId": stream_id,
"id": branch_id,
}
}
if name:
params["branch"]["name"] = name
if description:
params["branch"]["description"] = description
return self.make_request(
query=query, params=params, return_type="branchUpdate", parse_response=False
)
def delete(self, stream_id: str, branch_id: str):
"""Delete a branch
Arguments:
stream_id {str} -- the id of the stream containing the branch to delete
branch_id {str} -- the branch to delete
Returns:
bool -- True if deletion is successful
"""
metrics.track(metrics.BRANCH, self.account, {"name": "delete"})
query = gql(
"""
mutation BranchDelete($branch: BranchDeleteInput!) {
branchDelete(branch: $branch)
}
"""
)
params = {"branch": {"streamId": stream_id, "id": branch_id}}
return self.make_request(
query=query, params=params, return_type="branchDelete", parse_response=False
)
| 32.958525 | 97 | 0.457634 | from gql import gql
from specklepy.api.resource import ResourceBase
from specklepy.api.models import Branch
from specklepy.logging import metrics
NAME = "branch"
METHODS = ["create"]
class Resource(ResourceBase):
def __init__(self, account, basepath, client) -> None:
super().__init__(
account=account,
basepath=basepath,
client=client,
name=NAME,
methods=METHODS,
)
self.schema = Branch
def create(
self, stream_id: str, name: str, description: str = "No description provided"
) -> str:
metrics.track(metrics.BRANCH, self.account, {"name": "create"})
query = gql(
"""
mutation BranchCreate($branch: BranchCreateInput!) {
branchCreate(branch: $branch)
}
"""
)
params = {
"branch": {
"streamId": stream_id,
"name": name,
"description": description,
}
}
return self.make_request(
query=query, params=params, return_type="branchCreate", parse_response=False
)
def get(self, stream_id: str, name: str, commits_limit: int = 10):
metrics.track(metrics.BRANCH, self.account, {"name": "get"})
query = gql(
"""
query BranchGet($stream_id: String!, $name: String!, $commits_limit: Int!) {
stream(id: $stream_id) {
branch(name: $name) {
id,
name,
description,
commits (limit: $commits_limit) {
totalCount,
cursor,
items {
id,
referencedObject,
sourceApplication,
totalChildrenCount,
message,
authorName,
authorId,
branchName,
parents,
createdAt
}
}
}
}
}
"""
)
params = {"stream_id": stream_id, "name": name, "commits_limit": commits_limit}
return self.make_request(
query=query, params=params, return_type=["stream", "branch"]
)
def list(self, stream_id: str, branches_limit: int = 10, commits_limit: int = 10):
metrics.track(metrics.BRANCH, self.account, {"name": "get"})
query = gql(
"""
query BranchesGet($stream_id: String!, $branches_limit: Int!, $commits_limit: Int!) {
stream(id: $stream_id) {
branches(limit: $branches_limit) {
items {
id
name
description
commits(limit: $commits_limit) {
totalCount
items{
id
message
referencedObject
sourceApplication
parents
authorId
authorName
branchName
createdAt
}
}
}
}
}
}
"""
)
params = {
"stream_id": stream_id,
"branches_limit": branches_limit,
"commits_limit": commits_limit,
}
return self.make_request(
query=query, params=params, return_type=["stream", "branches", "items"]
)
def update(
self, stream_id: str, branch_id: str, name: str = None, description: str = None
):
metrics.track(metrics.BRANCH, self.account, {"name": "update"})
query = gql(
"""
mutation BranchUpdate($branch: BranchUpdateInput!) {
branchUpdate(branch: $branch)
}
"""
)
params = {
"branch": {
"streamId": stream_id,
"id": branch_id,
}
}
if name:
params["branch"]["name"] = name
if description:
params["branch"]["description"] = description
return self.make_request(
query=query, params=params, return_type="branchUpdate", parse_response=False
)
def delete(self, stream_id: str, branch_id: str):
metrics.track(metrics.BRANCH, self.account, {"name": "delete"})
query = gql(
"""
mutation BranchDelete($branch: BranchDeleteInput!) {
branchDelete(branch: $branch)
}
"""
)
params = {"branch": {"streamId": stream_id, "id": branch_id}}
return self.make_request(
query=query, params=params, return_type="branchDelete", parse_response=False
)
| true | true |
f71434b3c8211cc2ab644b5205326ec0c652e164 | 5,009 | py | Python | cnn/model_search.py | badrutdinovrr/darts | 434708e63cbda8f710d3c1810d06ad31c11db923 | [
"Apache-2.0"
] | null | null | null | cnn/model_search.py | badrutdinovrr/darts | 434708e63cbda8f710d3c1810d06ad31c11db923 | [
"Apache-2.0"
] | null | null | null | cnn/model_search.py | badrutdinovrr/darts | 434708e63cbda8f710d3c1810d06ad31c11db923 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import *
from torch.autograd import Variable
from genotypes import PRIMITIVES
from genotypes import Genotype
class MixedOp(nn.Module):
def __init__(self, C, stride):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in PRIMITIVES:
op = OPS[primitive](C, stride, False)
if 'pool' in primitive:
op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))
self._ops.append(op)
def forward(self, x, weights):
return sum(w * op(x) for w, op in zip(weights, self._ops))
class Cell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
self.reduction = reduction
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self._bns = nn.ModuleList()
for i in range(self._steps):
for j in range(2+i):
stride = 2 if reduction and j < 2 else 1
op = MixedOp(C, stride)
self._ops.append(op)
def forward(self, s0, s1, weights):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
s = sum(self._ops[offset+j](h, weights[offset+j]) for j, h in enumerate(states))
offset += len(states)
states.append(s)
return torch.cat(states[-self._multiplier:], dim=1)
class Network(nn.Module):
def __init__(self, C, num_classes, layers, criterion, steps=4, multiplier=4, stem_multiplier=3):
super(Network, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(1, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, multiplier*C_curr
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._initialize_alphas()
def new(self):
model_new = Network(self._C, self._num_classes, self._layers, self._criterion).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def forward(self, input):
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
if cell.reduction:
weights = F.softmax(self.alphas_reduce, dim=-1)
else:
weights = F.softmax(self.alphas_normal, dim=-1)
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits
def _loss(self, input, target):
logits = self(input)
return self._criterion(logits, target)
def _initialize_alphas(self):
k = sum(1 for i in range(self._steps) for n in range(2+i))
num_ops = len(PRIMITIVES)
self.alphas_normal = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self.alphas_reduce = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self._arch_parameters = [
self.alphas_normal,
self.alphas_reduce,
]
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
def _parse(weights):
gene = []
n = 2
start = 0
for i in range(self._steps):
end = start + n
W = weights[start:end].copy()
edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
n += 1
return gene
gene_normal = _parse(F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy())
gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy())
concat = range(2+self._steps-self._multiplier, self._steps+2)
genotype = Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat
)
return genotype
| 30.542683 | 128 | 0.643242 | import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import *
from torch.autograd import Variable
from genotypes import PRIMITIVES
from genotypes import Genotype
class MixedOp(nn.Module):
def __init__(self, C, stride):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in PRIMITIVES:
op = OPS[primitive](C, stride, False)
if 'pool' in primitive:
op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))
self._ops.append(op)
def forward(self, x, weights):
return sum(w * op(x) for w, op in zip(weights, self._ops))
class Cell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
self.reduction = reduction
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self._bns = nn.ModuleList()
for i in range(self._steps):
for j in range(2+i):
stride = 2 if reduction and j < 2 else 1
op = MixedOp(C, stride)
self._ops.append(op)
def forward(self, s0, s1, weights):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
s = sum(self._ops[offset+j](h, weights[offset+j]) for j, h in enumerate(states))
offset += len(states)
states.append(s)
return torch.cat(states[-self._multiplier:], dim=1)
class Network(nn.Module):
def __init__(self, C, num_classes, layers, criterion, steps=4, multiplier=4, stem_multiplier=3):
super(Network, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(1, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, multiplier*C_curr
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._initialize_alphas()
def new(self):
model_new = Network(self._C, self._num_classes, self._layers, self._criterion).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def forward(self, input):
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
if cell.reduction:
weights = F.softmax(self.alphas_reduce, dim=-1)
else:
weights = F.softmax(self.alphas_normal, dim=-1)
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits
def _loss(self, input, target):
logits = self(input)
return self._criterion(logits, target)
def _initialize_alphas(self):
k = sum(1 for i in range(self._steps) for n in range(2+i))
num_ops = len(PRIMITIVES)
self.alphas_normal = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self.alphas_reduce = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self._arch_parameters = [
self.alphas_normal,
self.alphas_reduce,
]
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
def _parse(weights):
gene = []
n = 2
start = 0
for i in range(self._steps):
end = start + n
W = weights[start:end].copy()
edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
n += 1
return gene
gene_normal = _parse(F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy())
gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy())
concat = range(2+self._steps-self._multiplier, self._steps+2)
genotype = Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat
)
return genotype
| true | true |
f71434c24f3b7959298b19af49f4893c651e600c | 2,465 | py | Python | credoscript/adaptors/variationadaptor.py | tlb-lab/credoscript | 32bdf08d84703dc2062dae4df1a95587d36c3cf7 | [
"MIT"
] | null | null | null | credoscript/adaptors/variationadaptor.py | tlb-lab/credoscript | 32bdf08d84703dc2062dae4df1a95587d36c3cf7 | [
"MIT"
] | null | null | null | credoscript/adaptors/variationadaptor.py | tlb-lab/credoscript | 32bdf08d84703dc2062dae4df1a95587d36c3cf7 | [
"MIT"
] | null | null | null | from sqlalchemy.sql.expression import and_
from credoscript.mixins.base import paginate
class VariationAdaptor(object):
"""
"""
def __init__(self, dynamic=False, paginate=False, per_page=100):
self.query = Variation.query
self.dynamic = dynamic
self.paginate = paginate
self.per_page = per_page
def fetch_by_variation_id(self, variation_id):
"""
"""
return self.query.get(variation_id)
def fetch_by_variation_name(self, variation_name):
"""
"""
return self.query.filter_by(variation_name=variation_name).first()
@paginate
def fetch_all_by_res_map_id(self, res_map_id, *expr, **kwargs):
"""
"""
query = self.query.join('Variation2PDB')
query = query.filter(Variation2PDB.res_map_id==res_map_id)
return query
@paginate
def fetch_all_by_chain_id(self, chain_id, *expr, **kwargs):
"""
"""
query = self.query.join('Variation2PDB')
query = query.join(Peptide, Peptide.res_map_id==Variation2PDB.res_map_id)
query = query.filter(and_(Peptide.chain_id==chain_id, *expr))
return query
@paginate
def fetch_all_ext_by_chain_id(self, chain_id, *expr, **kwargs):
"""
"""
query = self.query.join('Variation2UniProt','Variation2PDB','Peptide')
query = query.filter(and_(Peptide.chain_id==chain_id, *expr))
query = query.add_entity(Variation2UniProt)
query = query.add_entity(Peptide)
return query
@paginate
def fetch_all_by_phenotype_id(self, phenotype_id, *expr, **kwargs):
"""
"""
query = self.query.join('Annotations')
query = query.filter(and_(Annotation.phenotype_id==phenotype_id, *expr))
query = query.distinct()
return query
@paginate
def fetch_all_in_contact_with_ligand_id(self, ligand_id, *expr, **kwargs):
"""
Returns all variations that can be mapped onto binding sites defined by
the ligand having the input ligand identifier.
"""
query = self.query.join('Variation2BindingSites')
query = query.filter(and_(Variation2BindingSite.ligand_id==ligand_id,
*expr))
return query.distinct()
from ..models.variation import Variation, Annotation, Variation2UniProt, Variation2PDB, Variation2BindingSite
from ..models.peptide import Peptide
| 31.602564 | 109 | 0.643813 | from sqlalchemy.sql.expression import and_
from credoscript.mixins.base import paginate
class VariationAdaptor(object):
def __init__(self, dynamic=False, paginate=False, per_page=100):
self.query = Variation.query
self.dynamic = dynamic
self.paginate = paginate
self.per_page = per_page
def fetch_by_variation_id(self, variation_id):
return self.query.get(variation_id)
def fetch_by_variation_name(self, variation_name):
return self.query.filter_by(variation_name=variation_name).first()
@paginate
def fetch_all_by_res_map_id(self, res_map_id, *expr, **kwargs):
query = self.query.join('Variation2PDB')
query = query.filter(Variation2PDB.res_map_id==res_map_id)
return query
@paginate
def fetch_all_by_chain_id(self, chain_id, *expr, **kwargs):
query = self.query.join('Variation2PDB')
query = query.join(Peptide, Peptide.res_map_id==Variation2PDB.res_map_id)
query = query.filter(and_(Peptide.chain_id==chain_id, *expr))
return query
@paginate
def fetch_all_ext_by_chain_id(self, chain_id, *expr, **kwargs):
query = self.query.join('Variation2UniProt','Variation2PDB','Peptide')
query = query.filter(and_(Peptide.chain_id==chain_id, *expr))
query = query.add_entity(Variation2UniProt)
query = query.add_entity(Peptide)
return query
@paginate
def fetch_all_by_phenotype_id(self, phenotype_id, *expr, **kwargs):
query = self.query.join('Annotations')
query = query.filter(and_(Annotation.phenotype_id==phenotype_id, *expr))
query = query.distinct()
return query
@paginate
def fetch_all_in_contact_with_ligand_id(self, ligand_id, *expr, **kwargs):
query = self.query.join('Variation2BindingSites')
query = query.filter(and_(Variation2BindingSite.ligand_id==ligand_id,
*expr))
return query.distinct()
from ..models.variation import Variation, Annotation, Variation2UniProt, Variation2PDB, Variation2BindingSite
from ..models.peptide import Peptide
| true | true |
f71435aefbab60525e1f6180d047b1c4a343f58a | 957 | py | Python | test/test_basic_software_asset_all_of.py | cons3rt/cons3rt-python-sdk | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | [
"RSA-MD"
] | null | null | null | test/test_basic_software_asset_all_of.py | cons3rt/cons3rt-python-sdk | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | [
"RSA-MD"
] | null | null | null | test/test_basic_software_asset_all_of.py | cons3rt/cons3rt-python-sdk | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | [
"RSA-MD"
] | null | null | null | # coding: utf-8
"""
CONS3RT Web API
A CONS3RT ReSTful API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: apiteam@swagger.io
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.basic_software_asset_all_of import BasicSoftwareAssetAllOf # noqa: E501
from openapi_client.rest import ApiException
class TestBasicSoftwareAssetAllOf(unittest.TestCase):
"""BasicSoftwareAssetAllOf unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBasicSoftwareAssetAllOf(self):
"""Test BasicSoftwareAssetAllOf"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.basic_software_asset_all_of.BasicSoftwareAssetAllOf() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.341463 | 107 | 0.726228 |
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.basic_software_asset_all_of import BasicSoftwareAssetAllOf
from openapi_client.rest import ApiException
class TestBasicSoftwareAssetAllOf(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testBasicSoftwareAssetAllOf(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f7143656ce4da10df1aaa3d84302fc6d8f3085ff | 4,728 | py | Python | tests/integration_tests/build/test_coverage.py | Mehigh17/firecracker | 78c6b29f14f9e810c7426d935b5c4fbdfdfc4119 | [
"Apache-2.0"
] | null | null | null | tests/integration_tests/build/test_coverage.py | Mehigh17/firecracker | 78c6b29f14f9e810c7426d935b5c4fbdfdfc4119 | [
"Apache-2.0"
] | null | null | null | tests/integration_tests/build/test_coverage.py | Mehigh17/firecracker | 78c6b29f14f9e810c7426d935b5c4fbdfdfc4119 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests pertaining to line/branch test coverage for the Firecracker code base.
# TODO
- Put the coverage in `s3://spec.firecracker` and update it automatically.
target should be put in `s3://spec.firecracker` and automatically updated.
"""
import os
import platform
import re
import pytest
import framework.utils as utils
import host_tools.cargo_build as host # pylint: disable=import-error
COVERAGE_TARGET_PCT = 84.53
COVERAGE_MAX_DELTA = 0.05
CARGO_KCOV_REL_PATH = os.path.join(host.CARGO_BUILD_REL_PATH, 'kcov')
KCOV_COVERAGE_FILE = 'index.js'
"""kcov will aggregate coverage data in this file."""
KCOV_COVERED_LINES_REGEX = r'"covered_lines":"(\d+)"'
"""Regex for extracting number of total covered lines found by kcov."""
KCOV_TOTAL_LINES_REGEX = r'"total_lines" : "(\d+)"'
"""Regex for extracting number of total executable lines found by kcov."""
@pytest.mark.timeout(120)
@pytest.mark.skipif(
platform.machine() != "x86_64",
reason="no need to test it on multiple platforms"
)
def test_ensure_mod_tests():
"""Check that files containing unit tests have a 'tests' module defined."""
# List all source files containing rust #[test] attribute,
# (excluding generated files and integration test directories).
# Take the list and check each file contains 'mod tests {', output file
# name if it doesn't.
cmd = (
'/bin/bash '
'-c '
'"grep '
'--files-without-match '
'\'mod tests {\' '
'\\$(grep '
'--files-with-matches '
'--recursive '
'--exclude-dir=src/*_gen/* '
'\'\\#\\[test\\]\' ../src/*/src)" '
)
# The outer grep returns 0 even if it finds files without the match, so we
# ignore the return code.
result = utils.run_cmd(cmd, no_shell=False, ignore_return_code=True)
error_msg = (
'Tests found in files without a "tests" module:\n {}'
'To ensure code coverage is reported correctly, please check that '
'your tests are in a module named "tests".'.format(result.stdout)
)
assert not result.stdout, error_msg
@pytest.mark.timeout(400)
@pytest.mark.skipif(
platform.machine() != "x86_64",
reason="kcov hangs on aarch64"
)
def test_coverage(test_session_root_path, test_session_tmp_path):
"""Test line coverage with kcov.
The result is extracted from the $KCOV_COVERAGE_FILE file created by kcov
after a coverage run.
"""
exclude_pattern = (
'${CARGO_HOME:-$HOME/.cargo/},'
'build/,'
'tests/,'
'usr/lib/gcc,'
'lib/x86_64-linux-gnu/,'
# The following files/directories are auto-generated
'bootparam.rs,'
'elf.rs,'
'mpspec.rs,'
'msr_index.rs,'
'_gen'
)
exclude_region = '\'mod tests {\''
cmd = (
'CARGO_TARGET_DIR={} cargo kcov --all '
'--output {} -- '
'--exclude-pattern={} '
'--exclude-region={} --verify'
).format(
os.path.join(test_session_root_path, CARGO_KCOV_REL_PATH),
test_session_tmp_path,
exclude_pattern,
exclude_region
)
# By default, `cargo kcov` passes `--exclude-pattern=$CARGO_HOME --verify`
# to kcov. To pass others arguments, we need to include the defaults.
utils.run_cmd(cmd)
coverage_file = os.path.join(test_session_tmp_path, KCOV_COVERAGE_FILE)
with open(coverage_file) as cov_output:
contents = cov_output.read()
covered_lines = int(re.findall(KCOV_COVERED_LINES_REGEX, contents)[0])
total_lines = int(re.findall(KCOV_TOTAL_LINES_REGEX, contents)[0])
coverage = covered_lines / total_lines * 100
print("Number of executable lines: {}".format(total_lines))
print("Number of covered lines: {}".format(covered_lines))
print("Thus, coverage is: {:.2f}%".format(coverage))
coverage_low_msg = (
'Current code coverage ({:.2f}%) is below the target ({}%).'
.format(coverage, COVERAGE_TARGET_PCT)
)
min_coverage = COVERAGE_TARGET_PCT - COVERAGE_MAX_DELTA
assert coverage >= min_coverage, coverage_low_msg
# Get the name of the variable that needs updating.
namespace = globals()
cov_target_name = [name for name in namespace if namespace[name]
is COVERAGE_TARGET_PCT][0]
coverage_high_msg = (
'Current code coverage ({:.2f}%) is above the target ({}%).\n'
'Please update the value of {}.'
.format(coverage, COVERAGE_TARGET_PCT, cov_target_name)
)
assert coverage - COVERAGE_TARGET_PCT <= COVERAGE_MAX_DELTA,\
coverage_high_msg
| 32.833333 | 79 | 0.655245 |
import os
import platform
import re
import pytest
import framework.utils as utils
import host_tools.cargo_build as host
COVERAGE_TARGET_PCT = 84.53
COVERAGE_MAX_DELTA = 0.05
CARGO_KCOV_REL_PATH = os.path.join(host.CARGO_BUILD_REL_PATH, 'kcov')
KCOV_COVERAGE_FILE = 'index.js'
KCOV_COVERED_LINES_REGEX = r'"covered_lines":"(\d+)"'
KCOV_TOTAL_LINES_REGEX = r'"total_lines" : "(\d+)"'
@pytest.mark.timeout(120)
@pytest.mark.skipif(
platform.machine() != "x86_64",
reason="no need to test it on multiple platforms"
)
def test_ensure_mod_tests():
cmd = (
'/bin/bash '
'-c '
'"grep '
'--files-without-match '
'\'mod tests {\' '
'\\$(grep '
'--files-with-matches '
'--recursive '
'--exclude-dir=src/*_gen/* '
'\'\\#\\[test\\]\' ../src/*/src)" '
)
# The outer grep returns 0 even if it finds files without the match, so we
# ignore the return code.
result = utils.run_cmd(cmd, no_shell=False, ignore_return_code=True)
error_msg = (
'Tests found in files without a "tests" module:\n {}'
'To ensure code coverage is reported correctly, please check that '
'your tests are in a module named "tests".'.format(result.stdout)
)
assert not result.stdout, error_msg
@pytest.mark.timeout(400)
@pytest.mark.skipif(
platform.machine() != "x86_64",
reason="kcov hangs on aarch64"
)
def test_coverage(test_session_root_path, test_session_tmp_path):
exclude_pattern = (
'${CARGO_HOME:-$HOME/.cargo/},'
'build/,'
'tests/,'
'usr/lib/gcc,'
'lib/x86_64-linux-gnu/,'
# The following files/directories are auto-generated
'bootparam.rs,'
'elf.rs,'
'mpspec.rs,'
'msr_index.rs,'
'_gen'
)
exclude_region = '\'mod tests {\''
cmd = (
'CARGO_TARGET_DIR={} cargo kcov --all '
'--output {} -- '
'--exclude-pattern={} '
'--exclude-region={} --verify'
).format(
os.path.join(test_session_root_path, CARGO_KCOV_REL_PATH),
test_session_tmp_path,
exclude_pattern,
exclude_region
)
# By default, `cargo kcov` passes `--exclude-pattern=$CARGO_HOME --verify`
# to kcov. To pass others arguments, we need to include the defaults.
utils.run_cmd(cmd)
coverage_file = os.path.join(test_session_tmp_path, KCOV_COVERAGE_FILE)
with open(coverage_file) as cov_output:
contents = cov_output.read()
covered_lines = int(re.findall(KCOV_COVERED_LINES_REGEX, contents)[0])
total_lines = int(re.findall(KCOV_TOTAL_LINES_REGEX, contents)[0])
coverage = covered_lines / total_lines * 100
print("Number of executable lines: {}".format(total_lines))
print("Number of covered lines: {}".format(covered_lines))
print("Thus, coverage is: {:.2f}%".format(coverage))
coverage_low_msg = (
'Current code coverage ({:.2f}%) is below the target ({}%).'
.format(coverage, COVERAGE_TARGET_PCT)
)
min_coverage = COVERAGE_TARGET_PCT - COVERAGE_MAX_DELTA
assert coverage >= min_coverage, coverage_low_msg
# Get the name of the variable that needs updating.
namespace = globals()
cov_target_name = [name for name in namespace if namespace[name]
is COVERAGE_TARGET_PCT][0]
coverage_high_msg = (
'Current code coverage ({:.2f}%) is above the target ({}%).\n'
'Please update the value of {}.'
.format(coverage, COVERAGE_TARGET_PCT, cov_target_name)
)
assert coverage - COVERAGE_TARGET_PCT <= COVERAGE_MAX_DELTA,\
coverage_high_msg
| true | true |
f71436d10cc2c701fbdd2731e650a7b4d07afd22 | 6,393 | py | Python | bindings/python/ensmallen_graph/datasets/networkrepository/cfat5005.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/networkrepository/cfat5005.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/networkrepository/cfat5005.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph c-fat500-5.
The graph is automatically retrieved from the NetworkRepository repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:54:22.066913
The undirected graph c-fat500-5 has 500 nodes and 23191 unweighted edges,
of which none are self-loops. The graph is quite dense as it has a density
of 0.18590 and is connected, as it has a single component. The graph median
node degree is 92, the mean node degree is 92.76 and the node degree mode
is 92. The top 5 most central nodes are 499 (degree 95), 498 (degree 95),
483 (degree 95), 482 (degree 95) and 467 (degree 95).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
@misc{dimacs,
author={{DIMACS}},
title={DIMACS Challenge},
note={http://dimacs.rutgers.edu/Challenges/}}
@article{rossi2014coloring,
title={Coloring Large Complex Networks},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle={Social Network Analysis and Mining},
pages={1--51},
year={2014}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import CFat5005
# Then load the graph
graph = CFat5005()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def CFat5005(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the c-fat500-5 graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of c-fat500-5 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:54:22.066913
The undirected graph c-fat500-5 has 500 nodes and 23191 unweighted edges,
of which none are self-loops. The graph is quite dense as it has a density
of 0.18590 and is connected, as it has a single component. The graph median
node degree is 92, the mean node degree is 92.76 and the node degree mode
is 92. The top 5 most central nodes are 499 (degree 95), 498 (degree 95),
483 (degree 95), 482 (degree 95) and 467 (degree 95).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
@misc{dimacs,
author={{DIMACS}},
title={DIMACS Challenge},
note={http://dimacs.rutgers.edu/Challenges/}}
@article{rossi2014coloring,
title={Coloring Large Complex Networks},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle={Social Network Analysis and Mining},
pages={1--51},
year={2014}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import CFat5005
# Then load the graph
graph = CFat5005()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="CFat5005",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 31.185366 | 94 | 0.672141 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph
def CFat5005(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
return AutomaticallyRetrievedGraph(
graph_name="CFat5005",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f714374a1632476acaefbc832c81cdaf88352611 | 337 | py | Python | app.py | munrojm/api | 478eb7b7d65ee72c65c9c3a61aec02aed7aa5ffe | [
"BSD-3-Clause-LBNL"
] | null | null | null | app.py | munrojm/api | 478eb7b7d65ee72c65c9c3a61aec02aed7aa5ffe | [
"BSD-3-Clause-LBNL"
] | null | null | null | app.py | munrojm/api | 478eb7b7d65ee72c65c9c3a61aec02aed7aa5ffe | [
"BSD-3-Clause-LBNL"
] | null | null | null | import os
from monty.serialization import loadfn
from fastapi import FastAPI
import mp_api.xas.api
xas_store = os.environ.get("XAS_STORE", "xas_store.json")
xas_store = loadfn(xas_store)
xas_router = mp_api.xas.api.get_router(xas_store)
app = FastAPI(title="Materials Project API", version="3.0.0-dev")
app.include_router(xas_router)
| 25.923077 | 65 | 0.789318 | import os
from monty.serialization import loadfn
from fastapi import FastAPI
import mp_api.xas.api
xas_store = os.environ.get("XAS_STORE", "xas_store.json")
xas_store = loadfn(xas_store)
xas_router = mp_api.xas.api.get_router(xas_store)
app = FastAPI(title="Materials Project API", version="3.0.0-dev")
app.include_router(xas_router)
| true | true |
f714379ee3973b8021d36894d60ed8cb48ed5454 | 246 | py | Python | exercicios/Lista4/Q14.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | exercicios/Lista4/Q14.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | exercicios/Lista4/Q14.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | #Faça um programa que leia um vetor de 10 posições e verifique
#se existem valores iguais e os escreva na tela.
vetor=[]
for c in range(0,10):
n=int(input("Informe um numero: "))
if n in vetor:
print(f"{n}")
vetor.append(n)
| 24.6 | 62 | 0.650407 |
vetor=[]
for c in range(0,10):
n=int(input("Informe um numero: "))
if n in vetor:
print(f"{n}")
vetor.append(n)
| true | true |
f71438eae2367cd2d781df2131122da34442181b | 27,609 | py | Python | nova/tests/unit/virt/test_block_device.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/virt/test_block_device.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/virt/test_block_device.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | null | null | null | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo.serialization import jsonutils
from nova import block_device
from nova import context
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.volume import cinder
from nova.volume import encryptors
class TestDriverBlockDevice(test.NoDBTestCase):
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
'image': driver_block_device.DriverImageBlockDevice,
'blank': driver_block_device.DriverBlankBlockDevice
}
swap_bdm = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
snapshot_bdm = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
snapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
snapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
image_bdm = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
image_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
image_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
blank_bdm = block_device.BlockDeviceDict(
{'id': 6, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'blank',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
blank_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
blank_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
self.assertRaises(driver_block_device._NotTransformable,
cls, {'no_device': True})
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
self.assertThat(test_bdm, matchers.DictMatches(
getattr(self, "%s_driver_bdm" % name)))
for k, v in db_bdm.iteritems():
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
self.assertThat(test_bdm.legacy(),
matchers.DictMatches(
getattr(self, "%s_legacy_driver_bdm" % name)))
# Test passthru attributes
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
# Make sure that all others raise _invalidType
for other_name, cls in self.driver_classes.iteritems():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
# Test the save method
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save(self.context)
for fld, alias in test_bdm._update_on_save.iteritems():
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with(self.context)
# Test the save method with no context passed
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save()
save_mock.assert_called_once_with()
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("snapshot")
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('image')
test_bdm = self.driver_classes['image'](
self.image_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('image')
bdm = self.image_bdm.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'], bdm)
def test_driver_blank_block_device(self):
self._test_driver_device('blank')
test_bdm = self.driver_classes['blank'](
self.blank_bdm)
self.assertEqual(6, test_bdm._bdm_obj.id)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.assertEqual(3, test_bdm.volume_size)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, check_attach=True,
fail_check_attach=False, driver_attach=False,
fail_driver_attach=False, volume_attach=True,
access_mode='rw'):
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
enc_data = {'fake': 'enc_data'}
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if check_attach:
if not fail_check_attach:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndReturn(None)
else:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndRaise(
test.TestingException)
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(connection_info)
if driver_attach:
encryptors.get_encryption_metadata(
elevated_context, self.volume_api, fake_volume['id'],
connection_info).AndReturn(enc_data)
if not fail_driver_attach:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndReturn(None)
else:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndRaise(test.TestingException)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
expected_conn_info).AndReturn(None)
return instance, expected_conn_info
if volume_attach:
self.volume_api.attach(elevated_context, fake_volume['id'],
'fake_uuid', bdm_dict['device_name'],
mode=access_mode).AndReturn(None)
driver_bdm._bdm_obj.save(self.context).AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_check_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
def test_volume_no_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=False)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=False)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=True)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_refresh_connection(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
test_bdm._bdm_obj.save(self.context).AndReturn(None)
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.snapshot_bdm.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3,
'', '', snapshot).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.image_bdm.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](no_volume_image)
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1,
'', '', image_id=image['id']).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_volume(self):
test_bdm = self.driver_classes['image'](
self.image_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_blank_attach_volume(self):
no_blank_volume = self.blank_bdm.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['blank'](no_blank_volume)
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': 'fake-uuid'})
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': 'fake-uuid-blank-vol'}
with contextlib.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(volume_class, 'attach')
) as (vol_create, vol_attach):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(self.context,
test_bdm.volume_size,
'fake-uuid-blank-vol',
'')
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver,
do_check_attach=True)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_convert_block_devices(self):
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'],
[self.volume_bdm, self.ephemeral_bdm])
self.assertEqual(converted, [self.volume_driver_bdm])
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['snapshot'](
self.snapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
self.snapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in xrange(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in xrange(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertIsNone(driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.snapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.image_bdm.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(local_image))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
test_image = self.driver_classes['image'](self.image_bdm)
test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
test_volume = self.driver_classes['volume'](self.volume_bdm)
test_blank = self.driver_classes['blank'](self.blank_bdm)
for bdm in (test_image, test_snapshot, test_volume, test_blank):
self.assertTrue(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
for bdm in (test_swap, test_ephemeral):
self.assertFalse(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
| 40.305109 | 78 | 0.604477 |
import contextlib
import mock
from oslo.serialization import jsonutils
from nova import block_device
from nova import context
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.volume import cinder
from nova.volume import encryptors
class TestDriverBlockDevice(test.NoDBTestCase):
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
'image': driver_block_device.DriverImageBlockDevice,
'blank': driver_block_device.DriverBlankBlockDevice
}
swap_bdm = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
snapshot_bdm = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
snapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
snapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
image_bdm = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
image_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
image_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
blank_bdm = block_device.BlockDeviceDict(
{'id': 6, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'blank',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
blank_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
blank_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
self.assertRaises(driver_block_device._NotTransformable,
cls, {'no_device': True})
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
self.assertThat(test_bdm, matchers.DictMatches(
getattr(self, "%s_driver_bdm" % name)))
for k, v in db_bdm.iteritems():
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
self.assertThat(test_bdm.legacy(),
matchers.DictMatches(
getattr(self, "%s_legacy_driver_bdm" % name)))
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
for other_name, cls in self.driver_classes.iteritems():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save(self.context)
for fld, alias in test_bdm._update_on_save.iteritems():
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with(self.context)
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save()
save_mock.assert_called_once_with()
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("snapshot")
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('image')
test_bdm = self.driver_classes['image'](
self.image_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('image')
bdm = self.image_bdm.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'], bdm)
def test_driver_blank_block_device(self):
self._test_driver_device('blank')
test_bdm = self.driver_classes['blank'](
self.blank_bdm)
self.assertEqual(6, test_bdm._bdm_obj.id)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.assertEqual(3, test_bdm.volume_size)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, check_attach=True,
fail_check_attach=False, driver_attach=False,
fail_driver_attach=False, volume_attach=True,
access_mode='rw'):
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
enc_data = {'fake': 'enc_data'}
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if check_attach:
if not fail_check_attach:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndReturn(None)
else:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndRaise(
test.TestingException)
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(connection_info)
if driver_attach:
encryptors.get_encryption_metadata(
elevated_context, self.volume_api, fake_volume['id'],
connection_info).AndReturn(enc_data)
if not fail_driver_attach:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndReturn(None)
else:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndRaise(test.TestingException)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
expected_conn_info).AndReturn(None)
return instance, expected_conn_info
if volume_attach:
self.volume_api.attach(elevated_context, fake_volume['id'],
'fake_uuid', bdm_dict['device_name'],
mode=access_mode).AndReturn(None)
driver_bdm._bdm_obj.save(self.context).AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_check_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
def test_volume_no_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=False)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=False)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=True)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_refresh_connection(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
test_bdm._bdm_obj.save(self.context).AndReturn(None)
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.snapshot_bdm.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3,
'', '', snapshot).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.image_bdm.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](no_volume_image)
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1,
'', '', image_id=image['id']).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_volume(self):
test_bdm = self.driver_classes['image'](
self.image_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_blank_attach_volume(self):
no_blank_volume = self.blank_bdm.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['blank'](no_blank_volume)
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': 'fake-uuid'})
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': 'fake-uuid-blank-vol'}
with contextlib.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(volume_class, 'attach')
) as (vol_create, vol_attach):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(self.context,
test_bdm.volume_size,
'fake-uuid-blank-vol',
'')
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver,
do_check_attach=True)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_convert_block_devices(self):
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'],
[self.volume_bdm, self.ephemeral_bdm])
self.assertEqual(converted, [self.volume_driver_bdm])
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['snapshot'](
self.snapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
self.snapshot_legacy_driver_bdm])
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in xrange(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in xrange(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertIsNone(driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.snapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.image_bdm.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(local_image))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
test_image = self.driver_classes['image'](self.image_bdm)
test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
test_volume = self.driver_classes['volume'](self.volume_bdm)
test_blank = self.driver_classes['blank'](self.blank_bdm)
for bdm in (test_image, test_snapshot, test_volume, test_blank):
self.assertTrue(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
for bdm in (test_swap, test_ephemeral):
self.assertFalse(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
| true | true |
f71439ce9d32a0f70a4540340143a4985060ff8f | 8,950 | py | Python | algorithm/RL/DDPG.py | 915288938lx/Personae-master-01 | 0885c37956bd3f9157c66109e09755a51ad5d3a1 | [
"MIT"
] | null | null | null | algorithm/RL/DDPG.py | 915288938lx/Personae-master-01 | 0885c37956bd3f9157c66109e09755a51ad5d3a1 | [
"MIT"
] | null | null | null | algorithm/RL/DDPG.py | 915288938lx/Personae-master-01 | 0885c37956bd3f9157c66109e09755a51ad5d3a1 | [
"MIT"
] | null | null | null | # coding=utf-8
import tensorflow as tf
import numpy as np
import os
from algorithm import config
from base.env.market import Market
from checkpoints import CHECKPOINTS_DIR
from base.algorithm.model import BaseRLTFModel
from helper.args_parser import model_launcher_parser
from helper.data_logger import generate_algorithm_logger, generate_market_logger
class Algorithm(BaseRLTFModel):
def __init__(self, session, env, a_space, s_space, **options):
super(Algorithm, self).__init__(session, env, a_space, s_space, **options)
self.actor_loss, self.critic_loss = .0, .0
# Initialize buffer.
self.buffer = np.zeros((self.buffer_size, self.s_space * 2 + 1 + 1))
self.buffer_length = 0
self._init_input()
self._init_nn()
self._init_op()
self._init_saver()
self._init_summary_writer()
def _init_input(self):
self.s = tf.placeholder(tf.float32, [None, self.s_space], 'state')
self.r = tf.placeholder(tf.float32, [None, 1], 'reward')
self.s_next = tf.placeholder(tf.float32, [None, self.s_space], 'state_next')
def _init_nn(self):
# Initialize predict actor and critic.
self.a_predict = self.__build_actor_nn(self.s, "predict/actor", trainable=True)
self.q_predict = self.__build_critic(self.s, self.a_predict, "predict/critic", trainable=True)
# Initialize target actor and critic.
self.a_next = self.__build_actor_nn(self.s_next, "target/actor", trainable=False)
self.q_next = self.__build_critic(self.s_next, self.a_next, "target/critic", trainable=False)
# Save scopes
self.scopes = ["predict/actor", "target/actor", "predict/critic", "target/critic"]
def _init_op(self):
# Get actor and critic parameters.
params = [tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) for scope in self.scopes]
zipped_a_params, zipped_c_params = zip(params[0], params[1]), zip(params[2], params[3])
# Initialize update actor and critic op.
self.update_a = [tf.assign(t_a, (1 - self.tau) * t_a + self.tau * p_a) for p_a, t_a in zipped_a_params]
self.update_c = [tf.assign(t_c, (1 - self.tau) * t_c + self.tau * p_c) for p_c, t_c in zipped_c_params]
# Initialize actor loss and train op.
with tf.variable_scope('actor_loss'):
self.a_loss = -tf.reduce_mean(self.q_predict)
with tf.variable_scope('actor_train'):
self.a_train_op = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.a_loss, var_list=params[0])
# Initialize critic loss and train op.
self.q_target = self.r + self.gamma * self.q_next
with tf.variable_scope('critic_loss'):
self.c_loss = tf.losses.mean_squared_error(self.q_target, self.q_predict)
with tf.variable_scope('critic_train'):
self.c_train_op = tf.train.RMSPropOptimizer(self.learning_rate * 2).minimize(self.c_loss, var_list=params[2])
# Initialize variables.
self.session.run(tf.global_variables_initializer())
def run(self):
if self.mode != 'train':
self.restore()
else:
for episode in range(self.episodes):
self.log_loss(episode)
s = self.env.reset(self.mode)
while True:
c, a, a_index = self.predict(s)
s_next, r, status, info = self.env.forward(c, a)
self.save_transition(s, a_index, r, s_next)
self.train()
s = s_next
if status == self.env.Done:
self.env.trader.log_asset(episode)
break
if self.enable_saver and episode % 10 == 0:
self.save(episode)
def train(self):
if self.buffer_length < self.buffer_size:
return
self.session.run([self.update_a, self.update_c])
s, a, r, s_next = self.get_transition_batch()
self.critic_loss, _ = self.session.run([self.c_loss, self.c_train_op], {self.s: s, self.a_predict: a, self.r: r, self.s_next: s_next})
self.actor_loss, _ = self.session.run([self.a_loss, self.a_train_op], {self.s: s})
def predict(self, s):
a = self.session.run(self.a_predict, {self.s: s})[0][0]
return self.get_stock_code_and_action(a, use_greedy=True, use_prob=True if self.mode == 'train' else False)
def save_transition(self, s, a, r, s_next):
transition = np.hstack((s, [[a]], [[r]], s_next))
self.buffer[self.buffer_length % self.buffer_size, :] = transition
self.buffer_length += 1
def get_transition_batch(self):
indices = np.random.choice(self.buffer_size, size=self.batch_size)
batch = self.buffer[indices, :]
s = batch[:, :self.s_space]
a = batch[:, self.s_space: self.s_space + 1]
r = batch[:, -self.s_space - 1: -self.s_space]
s_next = batch[:, -self.s_space:]
return s, a, r, s_next
def log_loss(self, episode):
self.logger.warning("Episode: {0} | Actor Loss: {1:.2f} | Critic Loss: {2:.2f}".format(episode,
self.actor_loss,
self.critic_loss))
def __build_actor_nn(self, state, scope, trainable=True):
w_init, b_init = tf.random_normal_initializer(.0, .001), tf.constant_initializer(.1)
with tf.variable_scope(scope):
# state is ? * code_count * data_dim.
first_dense = tf.layers.dense(state,
64,
tf.nn.relu,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
action = tf.layers.dense(first_dense,
1,
tf.nn.sigmoid,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
return tf.multiply(action, self.a_space - 1)
@staticmethod
def __build_critic(state, action, scope, trainable=True):
w_init, b_init = tf.random_normal_initializer(.0, .3), tf.constant_initializer(.1)
with tf.variable_scope(scope):
s_first_dense = tf.layers.dense(state,
32,
tf.nn.relu,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
a_first_dense = tf.layers.dense(action,
32,
tf.nn.relu,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
q_value = tf.layers.dense(tf.nn.relu(s_first_dense + a_first_dense),
1,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
return q_value
def main(args):
mode = args.mode
# mode = 'test'
codes = args.codes
# codes = ["AU88", "RB88", "CU88", "AL88"]
# codes = ["T9999"]
market = args.market
# market = 'future'
episode = args.episode
# episode = 2000
# training_data_ratio = 0.5
training_data_ratio = args.training_data_ratio
model_name = os.path.basename(__file__).split('.')[0]
env = Market(codes, start_date="2012-01-01", end_date="2019-07-19", **{
"market": market,
# "use_sequence": True,
"logger": generate_market_logger(model_name),
"training_data_ratio": training_data_ratio,
})
algorithm = Algorithm(tf.Session(config=config), env, env.trader.action_space, env.data_dim, **{
"mode": mode,
"episodes": episode,
"enable_saver": True,
"learning_rate": 0.003,
"enable_summary_writer": True,
"logger": generate_algorithm_logger(model_name),
"save_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "model"),
"summary_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "summary"),
})
algorithm.run()
algorithm.eval()
algorithm.plot()
if __name__ == '__main__':
main(model_launcher_parser.parse_args())
| 42.018779 | 142 | 0.557765 |
import tensorflow as tf
import numpy as np
import os
from algorithm import config
from base.env.market import Market
from checkpoints import CHECKPOINTS_DIR
from base.algorithm.model import BaseRLTFModel
from helper.args_parser import model_launcher_parser
from helper.data_logger import generate_algorithm_logger, generate_market_logger
class Algorithm(BaseRLTFModel):
def __init__(self, session, env, a_space, s_space, **options):
super(Algorithm, self).__init__(session, env, a_space, s_space, **options)
self.actor_loss, self.critic_loss = .0, .0
self.buffer = np.zeros((self.buffer_size, self.s_space * 2 + 1 + 1))
self.buffer_length = 0
self._init_input()
self._init_nn()
self._init_op()
self._init_saver()
self._init_summary_writer()
def _init_input(self):
self.s = tf.placeholder(tf.float32, [None, self.s_space], 'state')
self.r = tf.placeholder(tf.float32, [None, 1], 'reward')
self.s_next = tf.placeholder(tf.float32, [None, self.s_space], 'state_next')
def _init_nn(self):
self.a_predict = self.__build_actor_nn(self.s, "predict/actor", trainable=True)
self.q_predict = self.__build_critic(self.s, self.a_predict, "predict/critic", trainable=True)
self.a_next = self.__build_actor_nn(self.s_next, "target/actor", trainable=False)
self.q_next = self.__build_critic(self.s_next, self.a_next, "target/critic", trainable=False)
self.scopes = ["predict/actor", "target/actor", "predict/critic", "target/critic"]
def _init_op(self):
params = [tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) for scope in self.scopes]
zipped_a_params, zipped_c_params = zip(params[0], params[1]), zip(params[2], params[3])
self.update_a = [tf.assign(t_a, (1 - self.tau) * t_a + self.tau * p_a) for p_a, t_a in zipped_a_params]
self.update_c = [tf.assign(t_c, (1 - self.tau) * t_c + self.tau * p_c) for p_c, t_c in zipped_c_params]
with tf.variable_scope('actor_loss'):
self.a_loss = -tf.reduce_mean(self.q_predict)
with tf.variable_scope('actor_train'):
self.a_train_op = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.a_loss, var_list=params[0])
self.q_target = self.r + self.gamma * self.q_next
with tf.variable_scope('critic_loss'):
self.c_loss = tf.losses.mean_squared_error(self.q_target, self.q_predict)
with tf.variable_scope('critic_train'):
self.c_train_op = tf.train.RMSPropOptimizer(self.learning_rate * 2).minimize(self.c_loss, var_list=params[2])
self.session.run(tf.global_variables_initializer())
def run(self):
if self.mode != 'train':
self.restore()
else:
for episode in range(self.episodes):
self.log_loss(episode)
s = self.env.reset(self.mode)
while True:
c, a, a_index = self.predict(s)
s_next, r, status, info = self.env.forward(c, a)
self.save_transition(s, a_index, r, s_next)
self.train()
s = s_next
if status == self.env.Done:
self.env.trader.log_asset(episode)
break
if self.enable_saver and episode % 10 == 0:
self.save(episode)
def train(self):
if self.buffer_length < self.buffer_size:
return
self.session.run([self.update_a, self.update_c])
s, a, r, s_next = self.get_transition_batch()
self.critic_loss, _ = self.session.run([self.c_loss, self.c_train_op], {self.s: s, self.a_predict: a, self.r: r, self.s_next: s_next})
self.actor_loss, _ = self.session.run([self.a_loss, self.a_train_op], {self.s: s})
def predict(self, s):
a = self.session.run(self.a_predict, {self.s: s})[0][0]
return self.get_stock_code_and_action(a, use_greedy=True, use_prob=True if self.mode == 'train' else False)
def save_transition(self, s, a, r, s_next):
transition = np.hstack((s, [[a]], [[r]], s_next))
self.buffer[self.buffer_length % self.buffer_size, :] = transition
self.buffer_length += 1
def get_transition_batch(self):
indices = np.random.choice(self.buffer_size, size=self.batch_size)
batch = self.buffer[indices, :]
s = batch[:, :self.s_space]
a = batch[:, self.s_space: self.s_space + 1]
r = batch[:, -self.s_space - 1: -self.s_space]
s_next = batch[:, -self.s_space:]
return s, a, r, s_next
def log_loss(self, episode):
self.logger.warning("Episode: {0} | Actor Loss: {1:.2f} | Critic Loss: {2:.2f}".format(episode,
self.actor_loss,
self.critic_loss))
def __build_actor_nn(self, state, scope, trainable=True):
w_init, b_init = tf.random_normal_initializer(.0, .001), tf.constant_initializer(.1)
with tf.variable_scope(scope):
first_dense = tf.layers.dense(state,
64,
tf.nn.relu,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
action = tf.layers.dense(first_dense,
1,
tf.nn.sigmoid,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
return tf.multiply(action, self.a_space - 1)
@staticmethod
def __build_critic(state, action, scope, trainable=True):
w_init, b_init = tf.random_normal_initializer(.0, .3), tf.constant_initializer(.1)
with tf.variable_scope(scope):
s_first_dense = tf.layers.dense(state,
32,
tf.nn.relu,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
a_first_dense = tf.layers.dense(action,
32,
tf.nn.relu,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
q_value = tf.layers.dense(tf.nn.relu(s_first_dense + a_first_dense),
1,
kernel_initializer=w_init,
bias_initializer=b_init,
trainable=trainable)
return q_value
def main(args):
mode = args.mode
codes = args.codes
market = args.market
episode = args.episode
training_data_ratio = args.training_data_ratio
model_name = os.path.basename(__file__).split('.')[0]
env = Market(codes, start_date="2012-01-01", end_date="2019-07-19", **{
"market": market,
"logger": generate_market_logger(model_name),
"training_data_ratio": training_data_ratio,
})
algorithm = Algorithm(tf.Session(config=config), env, env.trader.action_space, env.data_dim, **{
"mode": mode,
"episodes": episode,
"enable_saver": True,
"learning_rate": 0.003,
"enable_summary_writer": True,
"logger": generate_algorithm_logger(model_name),
"save_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "model"),
"summary_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "summary"),
})
algorithm.run()
algorithm.eval()
algorithm.plot()
if __name__ == '__main__':
main(model_launcher_parser.parse_args())
| true | true |
f7143a6df31b6e88eabff6f5aaf40943f677d15c | 6,824 | py | Python | pynotify/__init__.py | dhgrs/pynotify | 5bdfb0108466b7779f5bb7643b272c96f05c6f7c | [
"MIT"
] | null | null | null | pynotify/__init__.py | dhgrs/pynotify | 5bdfb0108466b7779f5bb7643b272c96f05c6f7c | [
"MIT"
] | null | null | null | pynotify/__init__.py | dhgrs/pynotify | 5bdfb0108466b7779f5bb7643b272c96f05c6f7c | [
"MIT"
] | null | null | null | import subprocess
class NotificationError(Exception):
pass
class BaseNotification:
def set_typed_variable(self, value, specified_type):
if isinstance(value, specified_type):
return value
else:
raise NotificationError(
'can only set '
f'{specified_type.__name__} '
f'(not "{value.__class__.__name__}")'
)
# Main
def notify(self):
raise NotImplementedError()
class OSSpecificNotification(BaseNotification):
'''
OSSpecificNotification:
OS ごとの通知
'''
def __init__(self):
import platform
self.system = platform.system()
# macOS 用の通知
def darwin_notify(self):
raise NotImplementedError()
# Linux 用の通知
def linux_notify(self):
raise NotImplementedError()
# Windows 用の通知
def windows_notify(self):
raise NotImplementedError()
# 通知の実行
def notify(self):
if self.system == 'Darwin':
self.darwin_notify()
elif self.system == 'Linux':
self.linux_notify()
elif self.system == 'Windows':
self.windows_notify()
else:
NotificationError(f'{self.system} is supported system')
class MessageNotification(BaseNotification):
'''
MessageNotification:
メッセージを打ち込める通知
引数:
message(str): 本文
'''
def __init__(self, message):
self._message = None
self.set_message(message)
# message のプロパティ用
def get_message(self):
return self._message
def set_message(self, message):
self._message = self.set_typed_variable(message, str)
message = property(get_message, set_message)
class WebhookNotification(MessageNotification):
'''
WebhookNotification:
Webhook による通知
引数:
message(str): 本文
url(str): Webhook の URL
'''
def __init__(self, message, url):
super().__init__(message)
self._url = None
self.set_url(url)
# url のプロパティ用
def get_url(self):
return self._url
def set_url(self, url):
self._url = self.set_typed_variable(url, str)
url = property(get_url, set_url)
class TokenNotification(MessageNotification):
'''
TokenNotification:
Token による通知
引数:
message(str): 本文
token(str): トークン
'''
def __init__(self, message, token):
super().__init__(message)
self._token = None
self.set_token(token)
# token のプロパティ用
def get_token(self):
return self._token
def set_token(self, token):
self._token = self.set_typed_variable(token, str)
token = property(get_token, set_token)
class BeepNotification(OSSpecificNotification):
'''
BeepNotification:
ビープ音による通知
引数:
times(int): ビープ音の回数
'''
def __init__(self, times):
super().__init__()
self._times = None
self.set_times(times)
# times のプロパティ用
def get_times(self):
return self._times
def set_times(self, times):
self._times = self.set_typed_variable(times, int)
times = property(get_times, set_times)
# 通知の実行
def darwin_notify(self):
cmd = ['osascript', '-e', f'beep {self._times}']
subprocess.run(cmd)
def linux_notify(self):
import time
for _ in range(self._times):
cmd = ['xkbbell']
time.sleep(0.5)
subprocess.run(cmd)
class CenterNotification(MessageNotification):
'''
CenterNotification:
通知センターによる通知
引数:
message(str): 本文
title(str): タイトル
subtitle(str): サブタイトル
sound(bool): 音の有無
'''
def __init__(self, message, title=None, subtitle=None, sound=True):
super().__init__(message)
self._title = None
self._subtitle = None
self._sound = None
if title:
self.set_title(title)
if subtitle:
self.set_subtitle(subtitle)
if sound:
self.set_sound(sound)
# title のプロパティ用
def get_title(self):
return self._title
def set_title(self, title):
self._title = self.set_typed_variable(title, str)
# タイトルとサブタイトルの両方がないといけないため、
# 片方だけ設定された場合、もう一方を空白にする
if not self._subtitle:
self._subtitle = ' '
title = property(get_title, set_title)
# subtitle のプロパティ用
def get_subtitle(self):
return self._subtitle
def set_subtitle(self, subtitle):
self._subtitle = self.set_typed_variable(subtitle, str)
# タイトルとサブタイトルの両方がないといけないため、
# 片方だけ設定された場合、もう一方を空白にする
if not self._title:
self._title = ' '
subtitle = property(get_subtitle, set_subtitle)
# sound のプロパティ用
def get_sound(self):
return self._sound
def set_sound(self, sound):
self._sound = self.set_typed_variable(sound, bool)
sound = property(get_sound, set_sound)
# 通知の実行
def notify(self):
_message = f'display notification \"{self._message}\"'
_title = \
f'with title \"{self._title}\" subtitle \"{self._subtitle}\"' \
if self._title and self._subtitle else ''
_sound = 'sound name \"\"' if self._sound else ''
cmd = ['osascript', '-e', f'{_message} {_title} {_sound}']
subprocess.run(cmd)
class SlackNotification(WebhookNotification):
'''
SlackNotification:
Slack による通知
引数(WebhookNotification):
message(str): 本文
url(str): Incoming Webhook の URL
'''
# 通知の実行
def notify(self):
import json
import requests
data = {'text': self._message}
requests.post(self._url, data=json.dumps(data))
class DiscordNotification(WebhookNotification):
'''
DiscordNotification:
Discord による通知
引数(WebhookNotification):
message(str): 本文
url(str): Discord の Webhook の URL
'''
# 通知の実行
def notify(self):
import json
import requests
data = {'content': self._message}
requests.post(
self._url,
headers={'Content-Type': 'application/json'},
data=json.dumps(data)
)
class LineNotification(TokenNotification):
'''
LineNotification:
Line による通知
引数:
message(str): 本文
token(str): LINE Notify のトークン
'''
def __init__(self, message, token):
super().__init__(message, token)
self.URL = 'https://notify-api.line.me/api/notify'
# 通知の実行
def notify(self):
import requests
headers = {'Authorization': f'Bearer {self._token}'}
params = {'message': self._message}
requests.post(
self.URL,
headers=headers,
params=params
)
| 24.028169 | 75 | 0.592175 | import subprocess
class NotificationError(Exception):
pass
class BaseNotification:
def set_typed_variable(self, value, specified_type):
if isinstance(value, specified_type):
return value
else:
raise NotificationError(
'can only set '
f'{specified_type.__name__} '
f'(not "{value.__class__.__name__}")'
)
def notify(self):
raise NotImplementedError()
class OSSpecificNotification(BaseNotification):
def __init__(self):
import platform
self.system = platform.system()
def darwin_notify(self):
raise NotImplementedError()
def linux_notify(self):
raise NotImplementedError()
def windows_notify(self):
raise NotImplementedError()
def notify(self):
if self.system == 'Darwin':
self.darwin_notify()
elif self.system == 'Linux':
self.linux_notify()
elif self.system == 'Windows':
self.windows_notify()
else:
NotificationError(f'{self.system} is supported system')
class MessageNotification(BaseNotification):
def __init__(self, message):
self._message = None
self.set_message(message)
def get_message(self):
return self._message
def set_message(self, message):
self._message = self.set_typed_variable(message, str)
message = property(get_message, set_message)
class WebhookNotification(MessageNotification):
def __init__(self, message, url):
super().__init__(message)
self._url = None
self.set_url(url)
def get_url(self):
return self._url
def set_url(self, url):
self._url = self.set_typed_variable(url, str)
url = property(get_url, set_url)
class TokenNotification(MessageNotification):
def __init__(self, message, token):
super().__init__(message)
self._token = None
self.set_token(token)
def get_token(self):
return self._token
def set_token(self, token):
self._token = self.set_typed_variable(token, str)
token = property(get_token, set_token)
class BeepNotification(OSSpecificNotification):
def __init__(self, times):
super().__init__()
self._times = None
self.set_times(times)
def get_times(self):
return self._times
def set_times(self, times):
self._times = self.set_typed_variable(times, int)
times = property(get_times, set_times)
def darwin_notify(self):
cmd = ['osascript', '-e', f'beep {self._times}']
subprocess.run(cmd)
def linux_notify(self):
import time
for _ in range(self._times):
cmd = ['xkbbell']
time.sleep(0.5)
subprocess.run(cmd)
class CenterNotification(MessageNotification):
def __init__(self, message, title=None, subtitle=None, sound=True):
super().__init__(message)
self._title = None
self._subtitle = None
self._sound = None
if title:
self.set_title(title)
if subtitle:
self.set_subtitle(subtitle)
if sound:
self.set_sound(sound)
def get_title(self):
return self._title
def set_title(self, title):
self._title = self.set_typed_variable(title, str)
if not self._subtitle:
self._subtitle = ' '
title = property(get_title, set_title)
def get_subtitle(self):
return self._subtitle
def set_subtitle(self, subtitle):
self._subtitle = self.set_typed_variable(subtitle, str)
if not self._title:
self._title = ' '
subtitle = property(get_subtitle, set_subtitle)
def get_sound(self):
return self._sound
def set_sound(self, sound):
self._sound = self.set_typed_variable(sound, bool)
sound = property(get_sound, set_sound)
def notify(self):
_message = f'display notification \"{self._message}\"'
_title = \
f'with title \"{self._title}\" subtitle \"{self._subtitle}\"' \
if self._title and self._subtitle else ''
_sound = 'sound name \"\"' if self._sound else ''
cmd = ['osascript', '-e', f'{_message} {_title} {_sound}']
subprocess.run(cmd)
class SlackNotification(WebhookNotification):
def notify(self):
import json
import requests
data = {'text': self._message}
requests.post(self._url, data=json.dumps(data))
class DiscordNotification(WebhookNotification):
def notify(self):
import json
import requests
data = {'content': self._message}
requests.post(
self._url,
headers={'Content-Type': 'application/json'},
data=json.dumps(data)
)
class LineNotification(TokenNotification):
def __init__(self, message, token):
super().__init__(message, token)
self.URL = 'https://notify-api.line.me/api/notify'
def notify(self):
import requests
headers = {'Authorization': f'Bearer {self._token}'}
params = {'message': self._message}
requests.post(
self.URL,
headers=headers,
params=params
)
| true | true |
f7143a7938cf66264f124bc702bc410c903aa5bf | 147 | py | Python | FastAPISQLAlchamyGraphQL/app/mutations/__init__.py | scionoftech/FastAPI-Full-Stack-Samples | e7d42661ed59324ff20f419d05c6cd1e7dab7e97 | [
"MIT"
] | 29 | 2021-03-31T02:42:59.000Z | 2022-03-12T16:20:05.000Z | FastAPIMongoEngineGraphQL/app/mutations/__init__.py | scionoftech/FastAPI-Full-Stack-Samples | e7d42661ed59324ff20f419d05c6cd1e7dab7e97 | [
"MIT"
] | null | null | null | FastAPIMongoEngineGraphQL/app/mutations/__init__.py | scionoftech/FastAPI-Full-Stack-Samples | e7d42661ed59324ff20f419d05c6cd1e7dab7e97 | [
"MIT"
] | 4 | 2021-08-21T01:02:00.000Z | 2022-01-09T15:33:51.000Z | from .user import CreateUser, AuthUser, UpdateUser, DeleteUser, UpdatePassword
from .articles import CreateArticle, UpdateArticle, DeleteArticle
| 49 | 79 | 0.836735 | from .user import CreateUser, AuthUser, UpdateUser, DeleteUser, UpdatePassword
from .articles import CreateArticle, UpdateArticle, DeleteArticle
| true | true |
f7143afde7eec54cc467e2279b80d92472d6fb74 | 319 | py | Python | bitly_api/__init__.py | galeone/bitly-api-python | 162add496ba2b42675b36581178902cce516cdf7 | [
"Apache-2.0"
] | 3 | 2018-08-29T08:53:57.000Z | 2019-02-22T19:56:11.000Z | bitly_api/__init__.py | galeone/bitly-api-python | 162add496ba2b42675b36581178902cce516cdf7 | [
"Apache-2.0"
] | null | null | null | bitly_api/__init__.py | galeone/bitly-api-python | 162add496ba2b42675b36581178902cce516cdf7 | [
"Apache-2.0"
] | 1 | 2019-06-28T20:30:47.000Z | 2019-06-28T20:30:47.000Z | from .bitly_api import Connection, BitlyError, Error
__version__ = '0.3'
__author__ = "Jehiah Czebotar <jehiah@gmail.com>"
__all__ = ["Connection", "BitlyError", "Error"]
__doc__ = """
This is a python library for the bitly api
all methods raise BitlyError on an unexpected response, or a problem with input
format
"""
| 31.9 | 79 | 0.752351 | from .bitly_api import Connection, BitlyError, Error
__version__ = '0.3'
__author__ = "Jehiah Czebotar <jehiah@gmail.com>"
__all__ = ["Connection", "BitlyError", "Error"]
__doc__ = """
This is a python library for the bitly api
all methods raise BitlyError on an unexpected response, or a problem with input
format
"""
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.