hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7283bdb2ea008a0af45944cac9adf39b2d9a8cf | 2,791 | py | Python | src/third_party/beaengine/tests/0f3a67.py | CrackerCat/rp | 5fe693c26d76b514efaedb4084f6e37d820db023 | [
"MIT"
] | 1 | 2022-01-17T17:40:29.000Z | 2022-01-17T17:40:29.000Z | src/third_party/beaengine/tests/0f3a67.py | CrackerCat/rp | 5fe693c26d76b514efaedb4084f6e37d820db023 | [
"MIT"
] | null | null | null | src/third_party/beaengine/tests/0f3a67.py | CrackerCat/rp | 5fe693c26d76b514efaedb4084f6e37d820db023 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# EVEX.256.66.0F3A.W0 67 /r ib
# vfpclassss ymm1{k1}{z}, ymm2, ymm3/m32, imm8
myEVEX = EVEX('EVEX.256.66.0F3A.W0')
Buffer = bytes.fromhex('{}672011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x67)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vfpclassss')
assert_equal(myDisasm.repr(), 'vfpclassss ymm28, ymm16, dword ptr [r8], 11h')
# EVEX.512.66.0F3A.W0 67 /r ib
# vfpclassss zmm1{k1}{z}, zmm2, zmm3/m32, imm8
myEVEX = EVEX('EVEX.512.66.0F3A.W0')
Buffer = bytes.fromhex('{}672011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x67)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vfpclassss')
assert_equal(myDisasm.repr(), 'vfpclassss zmm28, zmm16, dword ptr [r8], 11h')
# EVEX.256.66.0F3A.W1 67 /r ib
# vfpclasssd ymm1{k1}{z}, ymm2, ymm3/m64, imm8
myEVEX = EVEX('EVEX.256.66.0F3A.W1')
Buffer = bytes.fromhex('{}672011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x67)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vfpclasssd')
assert_equal(myDisasm.repr(), 'vfpclasssd ymm28, ymm16, qword ptr [r8], 11h')
# EVEX.512.66.0F3A.W1 67 /r ib
# vfpclasssd zmm1{k1}{z}, zmm2, zmm3/m64, imm8
myEVEX = EVEX('EVEX.512.66.0F3A.W1')
Buffer = bytes.fromhex('{}672011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x67)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vfpclasssd')
assert_equal(myDisasm.repr(), 'vfpclasssd zmm28, zmm16, qword ptr [r8], 11h')
| 40.449275 | 85 | 0.660337 |
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
myEVEX = EVEX('EVEX.256.66.0F3A.W0')
Buffer = bytes.fromhex('{}672011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x67)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vfpclassss')
assert_equal(myDisasm.repr(), 'vfpclassss ymm28, ymm16, dword ptr [r8], 11h')
myEVEX = EVEX('EVEX.512.66.0F3A.W0')
Buffer = bytes.fromhex('{}672011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x67)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vfpclassss')
assert_equal(myDisasm.repr(), 'vfpclassss zmm28, zmm16, dword ptr [r8], 11h')
myEVEX = EVEX('EVEX.256.66.0F3A.W1')
Buffer = bytes.fromhex('{}672011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x67)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vfpclasssd')
assert_equal(myDisasm.repr(), 'vfpclasssd ymm28, ymm16, qword ptr [r8], 11h')
myEVEX = EVEX('EVEX.512.66.0F3A.W1')
Buffer = bytes.fromhex('{}672011'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x67)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vfpclasssd')
assert_equal(myDisasm.repr(), 'vfpclasssd zmm28, zmm16, qword ptr [r8], 11h')
| true | true |
f7283da9211db669f163190590b7288c219db82f | 28,506 | py | Python | xarray_sentinel/sentinel1.py | scottyhq/xarray-sentinel | 3899a86e5bf5d56454e7467d9231bc97ebab8fe1 | [
"Apache-2.0"
] | null | null | null | xarray_sentinel/sentinel1.py | scottyhq/xarray-sentinel | 3899a86e5bf5d56454e7467d9231bc97ebab8fe1 | [
"Apache-2.0"
] | null | null | null | xarray_sentinel/sentinel1.py | scottyhq/xarray-sentinel | 3899a86e5bf5d56454e7467d9231bc97ebab8fe1 | [
"Apache-2.0"
] | null | null | null | """Map Sentinel-1 data products to xarray.
References:
- Sentinel-1 document library: https://sentinels.copernicus.eu/web/sentinel/user-guides/sentinel-1-sar/document-library
- Sentinel-1 Product Specification v3.9 07 May 2021 S1-RS-MDA-52-7441-3-9 documenting IPF 3.40
https://sentinel.esa.int/documents/247904/1877131/S1-RS-MDA-52-7441-3-9-2_Sentinel-1ProductSpecification.pdf
- Sentinel-1 Product Specification v3.7 27 February 2020 S1-RS-MDA-52-7441 documenting IPF 3.30
https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Specification
"""
import contextlib
import os
import typing as T
import warnings
import fsspec
import numpy as np
import pandas as pd
import xarray as xr
from . import conventions, esa_safe
SPEED_OF_LIGHT = 299_792_458 # m / s
ONE_SECOND = np.timedelta64(1, "s")
def get_fs_path(
urlpath_or_path: esa_safe.PathType,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
storage_options: T.Optional[T.Dict[str, T.Any]] = None,
) -> T.Tuple[fsspec.AbstractFileSystem, str]:
if fs is not None and storage_options is not None:
raise TypeError("only one of 'fs' and 'storage_options' can be not None")
if fs is None:
fs, _, paths = fsspec.get_fs_token_paths(
urlpath_or_path, storage_options=storage_options
)
if len(paths) == 0:
raise ValueError(f"file or object not found {urlpath_or_path!r}")
elif len(paths) > 1:
raise ValueError(f"multiple files or objects found {urlpath_or_path!r}")
path = paths[0]
else:
path = str(urlpath_or_path)
return fs, path
def normalise_group(group: T.Optional[str]) -> T.Tuple[str, T.Optional[int]]:
if group is None:
group = ""
if group.startswith("/"):
group = group[1:]
burst_index = None
parent_group, _, last_name = group.rpartition("/")
if parent_group.count("/") == 1 and last_name.isnumeric():
burst_index = int(last_name)
group = parent_group
return group, burst_index
def open_calibration_dataset(calibration: esa_safe.PathType) -> xr.Dataset:
calibration_vectors = esa_safe.parse_tag_as_list(
calibration, ".//calibrationVector", "calibration"
)
azimuth_time_list = []
pixel_list = []
line_list = []
sigmaNought_list = []
betaNought_list = []
gamma_list = []
dn_list = []
for vector in calibration_vectors:
azimuth_time_list.append(vector["azimuthTime"])
line_list.append(vector["line"])
pixel = np.fromstring(vector["pixel"]["$"], dtype=int, sep=" ") # type: ignore
pixel_list.append(pixel)
sigmaNought = np.fromstring(vector["sigmaNought"]["$"], dtype=np.float32, sep=" ") # type: ignore
sigmaNought_list.append(sigmaNought)
betaNought = np.fromstring(vector["betaNought"]["$"], dtype=np.float32, sep=" ") # type: ignore
betaNought_list.append(betaNought)
gamma = np.fromstring(vector["gamma"]["$"], dtype=np.float32, sep=" ") # type: ignore
gamma_list.append(gamma)
dn = np.fromstring(vector["dn"]["$"], dtype=np.float32, sep=" ") # type: ignore
dn_list.append(dn)
pixel = np.array(pixel_list)
if not np.allclose(pixel, pixel[0]):
raise ValueError(
"Unable to organise calibration vectors in a regular line-pixel grid"
)
data_vars = {
"azimuth_time": ("line", [np.datetime64(dt) for dt in azimuth_time_list]),
"sigmaNought": (("line", "pixel"), sigmaNought_list),
"betaNought": (("line", "pixel"), betaNought_list),
"gamma": (("line", "pixel"), gamma_list),
"dn": (("line", "pixel"), dn_list),
}
coords = {"line": line_list, "pixel": pixel_list[0]}
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_noise_range_dataset(noise: esa_safe.PathType) -> xr.Dataset:
noise_vectors = esa_safe.parse_tag_as_list(noise, ".//noiseRangeVector", "noise")
azimuth_time_list = []
pixel_list = []
line_list = []
noiseRangeLut_list = []
for vector in noise_vectors:
azimuth_time_list.append(vector["azimuthTime"])
line_list.append(vector["line"])
pixel = np.fromstring(vector["pixel"]["$"], dtype=int, sep=" ") # type: ignore
pixel_list.append(pixel)
noiseRangeLut = np.fromstring(vector["noiseRangeLut"]["$"], dtype=np.float32, sep=" ") # type: ignore
noiseRangeLut_list.append(noiseRangeLut)
pixel = np.array(pixel_list)
if not np.allclose(pixel, pixel[0]):
raise ValueError(
"Unable to organise noise vectors in a regular line-pixel grid"
)
data_vars = {
"azimuth_time": ("line", [np.datetime64(dt) for dt in azimuth_time_list]),
"noiseRangeLut": (("line", "pixel"), noiseRangeLut_list),
}
coords = {"line": line_list, "pixel": pixel_list[0]}
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_noise_azimuth_dataset(noise: esa_safe.PathType) -> xr.Dataset:
noise_vectors = esa_safe.parse_tag_as_list(noise, ".//noiseAzimuthVector", "noise")
first_range_sample = []
line_list = []
noiseAzimuthLut_list = []
for vector in noise_vectors:
first_range_sample.append(vector["firstRangeSample"])
line = np.fromstring(vector["line"]["$"], dtype=int, sep=" ") # type: ignore
line_list.append(line)
noiseAzimuthLut = np.fromstring(vector["noiseAzimuthLut"]["$"], dtype=np.float32, sep=" ") # type: ignore
noiseAzimuthLut_list.append(noiseAzimuthLut)
# BROKEN: GRDs have line and noiseAzimuthLut of different size, we take the first one
data_vars = {}
coords = {}
if first_range_sample:
data_vars["noiseAzimuthLut"] = ("line", noiseAzimuthLut_list[0])
coords["line"] = line_list[0]
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_coordinate_conversion_dataset(
annotation_path: esa_safe.PathType,
) -> xr.Dataset:
coordinate_conversion = esa_safe.parse_tag_as_list(
annotation_path, ".//coordinateConversionList/coordinateConversion"
)
gr0 = []
sr0 = []
azimuth_time = []
slant_range_time = []
srgrCoefficients: T.List[T.List[float]] = []
grsrCoefficients: T.List[T.List[float]] = []
for values in coordinate_conversion:
sr0.append(values["sr0"])
gr0.append(values["gr0"])
azimuth_time.append(values["azimuthTime"])
slant_range_time.append(values["slantRangeTime"])
srgrCoefficients.append(
[float(v) for v in values["srgrCoefficients"]["$"].split()]
)
grsrCoefficients.append(
[float(v) for v in values["grsrCoefficients"]["$"].split()]
)
coords: T.Dict[str, T.Any] = {}
data_vars: T.Dict[str, T.Any] = {}
if srgrCoefficients:
coords["azimuth_time"] = [np.datetime64(dt) for dt in azimuth_time]
coords["degree"] = list(range(len(srgrCoefficients[0])))
data_vars["gr0"] = ("azimuth_time", gr0)
data_vars["sr0"] = ("azimuth_time", sr0)
data_vars["slant_range_time"] = ("azimuth_time", slant_range_time)
data_vars["srgrCoefficients"] = (("azimuth_time", "degree"), srgrCoefficients)
data_vars["grsrCoefficients"] = (("azimuth_time", "degree"), grsrCoefficients)
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_gcp_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
geolocation_grid_points = esa_safe.parse_tag_as_list(
annotation, ".//geolocationGridPoint"
)
azimuth_time = []
slant_range_time = []
line_set = set()
pixel_set = set()
for ggp in geolocation_grid_points:
if ggp["line"] not in line_set:
azimuth_time.append(np.datetime64(ggp["azimuthTime"]))
line_set.add(ggp["line"])
if ggp["pixel"] not in pixel_set:
slant_range_time.append(ggp["slantRangeTime"])
pixel_set.add(ggp["pixel"])
shape = (len(azimuth_time), len(slant_range_time))
dims = ("azimuth_time", "slant_range_time")
data_vars = {
"latitude": (dims, np.full(shape, np.nan)),
"longitude": (dims, np.full(shape, np.nan)),
"height": (dims, np.full(shape, np.nan)),
"incidenceAngle": (dims, np.full(shape, np.nan)),
"elevationAngle": (dims, np.full(shape, np.nan)),
}
line = sorted(line_set)
pixel = sorted(pixel_set)
for ggp in geolocation_grid_points:
for var in data_vars:
j = line.index(ggp["line"])
i = pixel.index(ggp["pixel"])
data_vars[var][1][j, i] = ggp[var]
ds = xr.Dataset(
data_vars=data_vars,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"slant_range_time": slant_range_time,
"line": ("azimuth_time", line),
"pixel": ("slant_range_time", pixel),
},
)
return ds
def open_attitude_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
attitudes = esa_safe.parse_tag_as_list(annotation, ".//attitude")
variables = ["q0", "q1", "q2", "q3", "wx", "wy", "wz", "pitch", "roll", "yaw"]
azimuth_time: T.List[T.Any] = []
data_vars: T.Dict[str, T.Any] = {var: ("azimuth_time", []) for var in variables}
for attitude in attitudes:
azimuth_time.append(attitude["time"])
for var in variables:
data_vars[var][1].append(attitude[var])
ds = xr.Dataset(
data_vars=data_vars,
coords={"azimuth_time": [np.datetime64(dt) for dt in azimuth_time]},
)
return ds
def open_orbit_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
orbits = esa_safe.parse_tag_as_list(annotation, ".//orbit")
reference_system = orbits[0]["frame"]
variables = ["position", "velocity"]
data: T.Dict[str, T.List[T.Any]] = {var: [[], [], []] for var in variables}
azimuth_time: T.List[T.Any] = []
for orbit in orbits:
azimuth_time.append(orbit["time"])
data["position"][0].append(orbit["position"]["x"])
data["position"][1].append(orbit["position"]["y"])
data["position"][2].append(orbit["position"]["z"])
data["velocity"][0].append(orbit["velocity"]["x"])
data["velocity"][1].append(orbit["velocity"]["y"])
data["velocity"][2].append(orbit["velocity"]["z"])
if orbit["frame"] != reference_system:
warnings.warn(
"reference_system is not consistent in all the state vectors. "
)
reference_system = None
position = xr.Variable(data=data["position"], dims=("axis", "azimuth_time")) # type: ignore
velocity = xr.Variable(data=data["velocity"], dims=("axis", "azimuth_time")) # type: ignore
attrs = {}
if reference_system is not None:
attrs.update({"reference_system": reference_system})
ds = xr.Dataset(
data_vars={"position": position, "velocity": velocity},
attrs=attrs,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"axis": [0, 1, 2],
},
)
return ds
def open_dc_estimate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
dc_estimates = esa_safe.parse_tag_as_list(annotation, ".//dcEstimate")
azimuth_time = []
t0 = []
data_dc_poly = []
for dc_estimate in dc_estimates:
azimuth_time.append(dc_estimate["azimuthTime"])
t0.append(dc_estimate["t0"])
data_dc_poly.append(
[float(c) for c in dc_estimate["dataDcPolynomial"]["$"].split()]
)
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"data_dc_polynomial": (("azimuth_time", "degree"), data_dc_poly),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(data_dc_poly[0]))),
},
)
return ds
def open_azimuth_fm_rate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
azimuth_fm_rates = esa_safe.parse_tag_as_list(annotation, ".//azimuthFmRate")
azimuth_time = []
t0 = []
azimuth_fm_rate_poly = []
for azimuth_fm_rate in azimuth_fm_rates:
azimuth_time.append(azimuth_fm_rate["azimuthTime"])
t0.append(azimuth_fm_rate["t0"])
azimuth_fm_rate_poly.append(
[float(c) for c in azimuth_fm_rate["azimuthFmRatePolynomial"]["$"].split()]
)
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"azimuth_fm_rate_polynomial": (
("azimuth_time", "degree"),
azimuth_fm_rate_poly,
),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(azimuth_fm_rate_poly[0]))),
},
)
return ds
def find_available_groups(
product_files: T.Dict[str, T.Tuple[str, str, str, str, str]],
product_path: str,
check_files_exist: bool = False,
fs: fsspec.AbstractFileSystem = fsspec.filesystem("file"),
) -> T.Dict[str, T.List[str]]:
groups: T.Dict[str, T.List[str]] = {}
for path, (type, _, swath, polarization, _) in product_files.items():
swath_pol_group = f"{swath}/{polarization}".upper()
abspath = os.path.join(product_path, os.path.normpath(path))
if check_files_exist:
if not fs.exists(abspath):
continue
if type == "s1Level1ProductSchema":
groups[swath.upper()] = [""]
groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])
for metadata_group in [
"orbit",
"attitude",
"azimuth_fm_rate",
"dc_estimate",
"gcp",
"coordinate_conversion",
]:
groups[f"{swath_pol_group}/{metadata_group}"] = [abspath]
elif type == "s1Level1CalibrationSchema":
groups[f"{swath_pol_group}/calibration"] = [abspath]
elif type == "s1Level1NoiseSchema":
groups[f"{swath_pol_group}/noise_range"] = [abspath]
groups[f"{swath_pol_group}/noise_azimuth"] = [abspath]
elif type == "s1Level1MeasurementSchema":
groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])
return groups
def open_pol_dataset(
measurement: esa_safe.PathOrFileType,
annotation: esa_safe.PathOrFileType,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
) -> xr.Dataset:
product_information = esa_safe.parse_tag(annotation, ".//productInformation")
image_information = esa_safe.parse_tag(annotation, ".//imageInformation")
swath_timing = esa_safe.parse_tag(annotation, ".//swathTiming")
incidence_angle_mid_swath = image_information["incidenceAngleMidSwath"]
number_of_samples = image_information["numberOfSamples"]
first_slant_range_time = image_information["slantRangeTime"]
slant_range_time_interval = 1 / product_information["rangeSamplingRate"]
number_of_lines = image_information["numberOfLines"]
first_azimuth_time = image_information["productFirstLineUtcTime"]
azimuth_time_interval = image_information["azimuthTimeInterval"]
number_of_bursts = swath_timing["burstList"]["@count"]
range_pixel_spaxing = image_information["rangePixelSpacing"]
anx_datetime = image_information["ascendingNodeTime"]
attrs = {
"sar:center_frequency": product_information["radarFrequency"] / 10 ** 9,
"sar:pixel_spacing_azimuth": image_information["azimuthPixelSpacing"],
"sar:pixel_spacing_range": range_pixel_spaxing,
"azimuth_time_interval": azimuth_time_interval,
"slant_range_time_interval": slant_range_time_interval,
"incidence_angle_mid_swath": incidence_angle_mid_swath,
"sat:anx_datetime": anx_datetime + "Z",
}
encoding = {}
swap_dims = {}
chunks: T.Union[None, T.Dict[str, int]] = None
azimuth_time = pd.date_range(
start=first_azimuth_time,
periods=number_of_lines,
freq=pd.Timedelta(azimuth_time_interval, "s"),
).values
if number_of_bursts == 0:
swap_dims = {"line": "azimuth_time", "pixel": "slant_range_time"}
else:
lines_per_burst = swath_timing["linesPerBurst"]
attrs.update(
{
"azimuth_steering_rate": product_information["azimuthSteeringRate"],
"number_of_bursts": number_of_bursts,
"lines_per_burst": lines_per_burst,
}
)
for burst_index, burst in enumerate(swath_timing["burstList"]["burst"]):
first_azimuth_time_burst = burst["azimuthTime"]
azimuth_time_burst = pd.date_range(
start=first_azimuth_time_burst,
periods=lines_per_burst,
freq=pd.Timedelta(azimuth_time_interval, "s"),
)
azimuth_time[
lines_per_burst * burst_index : lines_per_burst * (burst_index + 1)
] = azimuth_time_burst
# chunk at burst boundaries if dask is present
try:
import dask # noqa
encoding["preferred_chunks"] = {"line": lines_per_burst}
chunks = {}
except ModuleNotFoundError:
pass
coords = {
"pixel": np.arange(0, number_of_samples, dtype=int),
"line": np.arange(0, number_of_lines, dtype=int),
"azimuth_time": ("line", azimuth_time),
}
if product_information["projection"] == "Slant Range":
slant_range_time = np.linspace(
first_slant_range_time,
first_slant_range_time
+ slant_range_time_interval * (number_of_samples - 1),
number_of_samples,
)
coords["slant_range_time"] = ("pixel", slant_range_time)
elif product_information["projection"] == "Ground Range":
ground_range = np.linspace(
0,
range_pixel_spaxing * (number_of_samples - 1),
number_of_samples,
)
coords["ground_range"] = ("pixel", ground_range)
swap_dims = {"line": "azimuth_time", "pixel": "ground_range"}
else:
raise ValueError(f"unknown projection {product_information['projection']}")
# temporary ugly work-around to get fsspec support with rasterio >= 1.3a3
# the try block uses fsspec if rasterio >= 1.3a3 is installed
# the except block falls back to standard file based rasterio
# the with is needed to avoid polluting stderr when the try block fails
with contextlib.redirect_stderr(open("/dev/null", "w")):
try:
arr = xr.open_dataarray(fs.open(measurement), engine="rasterio", chunks=chunks) # type: ignore
except AttributeError:
arr = xr.open_dataarray(measurement, engine="rasterio") # type: ignore
arr = arr.squeeze("band").drop_vars(["band", "spatial_ref"])
arr = arr.rename({"y": "line", "x": "pixel"})
arr = arr.assign_coords(coords)
arr = arr.swap_dims(swap_dims)
arr.attrs.update(attrs)
arr.encoding.update(encoding)
return xr.Dataset(attrs=attrs, data_vars={"measurement": arr})
def find_bursts_index(
pol_dataset: xr.Dataset,
azimuth_anx_time: float,
use_center: bool = False,
) -> int:
lines_per_burst = pol_dataset.attrs["lines_per_burst"]
anx_datetime = np.datetime64(pol_dataset.attrs["sat:anx_datetime"].replace("Z", ""))
azimuth_anx_time = pd.Timedelta(azimuth_anx_time, unit="s")
if use_center:
azimuth_anx_time_center = (
pol_dataset.azimuth_time[lines_per_burst // 2 :: lines_per_burst]
- anx_datetime
)
distance = abs(azimuth_anx_time_center - azimuth_anx_time)
else:
azimuth_anx_time_first_line = (
pol_dataset.azimuth_time[::lines_per_burst] - anx_datetime
)
distance = abs(azimuth_anx_time_first_line - azimuth_anx_time)
return distance.argmin().item() # type: ignore
def crop_burst_dataset(
pol_dataset: xr.Dataset,
burst_index: T.Optional[int] = None,
azimuth_anx_time: T.Optional[float] = None,
use_center: bool = False,
) -> xr.Dataset:
if (burst_index is not None) and (azimuth_anx_time is not None):
raise TypeError(
"only one keyword between 'burst_index' and 'azimuth_anx_time' must be defined"
)
if burst_index is None:
if azimuth_anx_time is not None:
burst_index = find_bursts_index(
pol_dataset, azimuth_anx_time, use_center=use_center
)
else:
raise TypeError(
"one keyword between 'burst_index' and 'azimuth_anx_time' must be defined"
)
if burst_index < 0 or burst_index >= pol_dataset.attrs["number_of_bursts"]:
raise IndexError(f"burst_index={burst_index} out of bounds")
lines_per_burst = pol_dataset.attrs["lines_per_burst"]
ds = pol_dataset.sel(
line=slice(
lines_per_burst * burst_index, lines_per_burst * (burst_index + 1) - 1
)
)
anx_datetime = np.datetime64(pol_dataset.attrs["sat:anx_datetime"].replace("Z", ""))
burst_azimuth_anx_times = ds.azimuth_time - anx_datetime
ds.attrs["azimuth_anx_time"] = burst_azimuth_anx_times.values[0] / ONE_SECOND
ds = ds.swap_dims({"line": "azimuth_time", "pixel": "slant_range_time"})
ds.attrs["burst_index"] = burst_index
return ds
def mosaic_slc_iw(slc_iw_image: xr.Dataset, crop: int = 90) -> xr.Dataset:
bursts = []
for i in range(slc_iw_image.attrs["number_of_bursts"]):
burst = crop_burst_dataset(slc_iw_image, burst_index=i)
bursts.append(burst.isel(azimuth_time=slice(crop, -crop)))
return xr.concat(bursts, dim="azimuth_time")
def calibrate_amplitude(
digital_number: xr.DataArray, calibration_lut: xr.DataArray
) -> xr.DataArray:
calibration = calibration_lut.interp(
line=digital_number.line,
pixel=digital_number.pixel,
).astype(np.float32)
amplitude = digital_number / calibration
amplitude.attrs.update(digital_number.attrs)
try:
lut_name = calibration_lut.attrs["long_name"].partition("calibration LUT")[0]
amplitude.attrs["long_name"] = f"amplitude for {lut_name}"
amplitude.attrs["units"] = calibration.attrs["units"]
except KeyError:
pass
return amplitude
def calibrate_intensity(
digital_number: xr.DataArray,
calibration_lut: xr.DataArray,
as_db: bool = False,
min_db: T.Optional[float] = -40.0,
) -> xr.DataArray:
amplitude = calibrate_amplitude(digital_number, calibration_lut)
intensity = abs(amplitude) ** 2
if as_db:
intensity = 10.0 * np.log10(intensity)
if min_db is not None:
intensity = np.maximum(intensity, min_db)
intensity.attrs.update(amplitude.attrs)
intensity.attrs["units"] = "dB"
else:
intensity.attrs.update(amplitude.attrs)
intensity.attrs["units"] = "m2 m-2"
try:
lut_name = amplitude.attrs["long_name"].partition("amplitude for ")[2]
intensity.attrs["long_name"] = lut_name
except KeyError:
pass
return intensity
def slant_range_time_to_ground_range(
azimuth_time: xr.DataArray,
slant_range_time: xr.DataArray,
coordinate_conversion: xr.DataArray,
) -> xr.DataArray:
slant_range = SPEED_OF_LIGHT / 2.0 * slant_range_time
cc = coordinate_conversion.interp(azimuth_time=azimuth_time)
x = slant_range - cc.sr0
ground_range = (cc.srgrCoefficients * x ** cc.degree).sum("degree")
return ground_range # type: ignore
def assign_slant_range_time_coord(
measurement: xr.Dataset, coordinate_conversion: xr.Dataset
) -> xr.Dataset:
x = measurement.ground_range - coordinate_conversion.gr0
slant_range = (
coordinate_conversion.grsrCoefficients * x ** coordinate_conversion.degree
).sum(dim="degree")
slant_range_coord = slant_range.interp(
azimuth_time=measurement.azimuth_time, ground_range=measurement.ground_range
).data
slant_range_time = 2 / SPEED_OF_LIGHT * slant_range_coord
measurement = measurement.assign_coords(
slant_range_time=(("azimuth_time", "ground_range"), slant_range_time)
) # type: ignore
return measurement
def build_burst_id(lat: float, lon: float, relative_orbit: int) -> str:
lat = int(round(lat * 10))
lon = int(round(lon * 10))
n_or_s = "N" if lat >= 0 else "S"
e_or_w = "E" if lon >= 0 else "W"
burst_id = f"R{relative_orbit:03}" f"-{n_or_s}{lat:03}" f"-{e_or_w}{lon:04}"
return burst_id
def compute_burst_centres(
gcp: xr.Dataset,
) -> T.Tuple[T.List[float], T.List[float]]:
gcp_rolling = gcp.rolling(azimuth_time=2, min_periods=1)
gc_az_win = gcp_rolling.construct(azimuth_time="az_win")
centre = gc_az_win.mean(["az_win", "slant_range_time"])
centre = centre.isel(azimuth_time=slice(1, None))
return centre.latitude.values.tolist(), centre.longitude.values.tolist()
METADATA_OPENERS = {
"orbit": open_orbit_dataset,
"attitude": open_attitude_dataset,
"azimuth_fm_rate": open_azimuth_fm_rate_dataset,
"dc_estimate": open_dc_estimate_dataset,
"gcp": open_gcp_dataset,
"coordinate_conversion": open_coordinate_conversion_dataset,
"calibration": open_calibration_dataset,
"noise_range": open_noise_range_dataset,
"noise_azimuth": open_noise_azimuth_dataset,
}
def do_override_product_files(
template: str, product_files: T.Dict[str, T.Tuple[str, str, str, str, str]]
) -> T.Dict[str, T.Tuple[str, str, str, str, str]]:
overridden_product_files = {}
for path, description in product_files.items():
type, prefix, swath, polarization, date = description
ext = os.path.splitext(path)[1]
dirname = os.path.dirname(path)
overridden_path = template.format(**locals())
overridden_product_files[overridden_path] = description
return overridden_product_files
def open_sentinel1_dataset(
product_urlpath: esa_safe.PathType,
*,
drop_variables: T.Optional[T.Tuple[str]] = None,
group: T.Optional[str] = None,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
storage_options: T.Optional[T.Dict[str, T.Any]] = None,
check_files_exist: bool = False,
override_product_files: T.Optional[str] = None,
) -> xr.Dataset:
if drop_variables is not None:
warnings.warn("'drop_variables' is currently ignored")
fs, manifest_path = get_fs_path(product_urlpath, fs, storage_options)
if fs.isdir(manifest_path):
manifest_path = os.path.join(manifest_path, "manifest.safe")
product_path = os.path.dirname(manifest_path)
with fs.open(manifest_path) as file:
product_attrs, product_files = esa_safe.parse_manifest_sentinel1(file)
if override_product_files:
product_files = do_override_product_files(override_product_files, product_files)
groups = find_available_groups(
product_files, product_path, check_files_exist=check_files_exist, fs=fs
)
group, burst_index = normalise_group(group)
absgroup = f"/{group}"
if group != "" and group not in groups:
raise ValueError(
f"Invalid group {group!r}, please select one of the following groups:"
f"\n{list(groups.keys())}"
)
metadata = ""
ds = xr.Dataset()
if group == "":
subgroups = list(groups)
else:
subgroups = [
g[len(group) + 1 :] for g in groups if g.startswith(group) and g != group
]
if group.count("/") == 1:
with fs.open(groups[group][1]) as annotation:
ds = open_pol_dataset(groups[group][0], annotation, fs=fs)
elif group.count("/") == 2:
_, _, metadata = group.split("/", 2)
with fs.open(groups[group][0]) as file:
ds = METADATA_OPENERS[metadata](file)
for data_var in ds.data_vars:
ds.data_vars[data_var].attrs.update(product_attrs)
product_attrs["group"] = absgroup
if len(subgroups):
product_attrs["subgroups"] = subgroups
ds.attrs.update(product_attrs) # type: ignore
if group.count("/") == 1 and burst_index is not None:
ds = crop_burst_dataset(ds, burst_index=burst_index)
conventions.update_attributes(ds, group=metadata)
return ds
| 37.311518 | 121 | 0.646881 |
import contextlib
import os
import typing as T
import warnings
import fsspec
import numpy as np
import pandas as pd
import xarray as xr
from . import conventions, esa_safe
SPEED_OF_LIGHT = 299_792_458
ONE_SECOND = np.timedelta64(1, "s")
def get_fs_path(
urlpath_or_path: esa_safe.PathType,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
storage_options: T.Optional[T.Dict[str, T.Any]] = None,
) -> T.Tuple[fsspec.AbstractFileSystem, str]:
if fs is not None and storage_options is not None:
raise TypeError("only one of 'fs' and 'storage_options' can be not None")
if fs is None:
fs, _, paths = fsspec.get_fs_token_paths(
urlpath_or_path, storage_options=storage_options
)
if len(paths) == 0:
raise ValueError(f"file or object not found {urlpath_or_path!r}")
elif len(paths) > 1:
raise ValueError(f"multiple files or objects found {urlpath_or_path!r}")
path = paths[0]
else:
path = str(urlpath_or_path)
return fs, path
def normalise_group(group: T.Optional[str]) -> T.Tuple[str, T.Optional[int]]:
if group is None:
group = ""
if group.startswith("/"):
group = group[1:]
burst_index = None
parent_group, _, last_name = group.rpartition("/")
if parent_group.count("/") == 1 and last_name.isnumeric():
burst_index = int(last_name)
group = parent_group
return group, burst_index
def open_calibration_dataset(calibration: esa_safe.PathType) -> xr.Dataset:
calibration_vectors = esa_safe.parse_tag_as_list(
calibration, ".//calibrationVector", "calibration"
)
azimuth_time_list = []
pixel_list = []
line_list = []
sigmaNought_list = []
betaNought_list = []
gamma_list = []
dn_list = []
for vector in calibration_vectors:
azimuth_time_list.append(vector["azimuthTime"])
line_list.append(vector["line"])
pixel = np.fromstring(vector["pixel"]["$"], dtype=int, sep=" ")
pixel_list.append(pixel)
sigmaNought = np.fromstring(vector["sigmaNought"]["$"], dtype=np.float32, sep=" ")
sigmaNought_list.append(sigmaNought)
betaNought = np.fromstring(vector["betaNought"]["$"], dtype=np.float32, sep=" ")
betaNought_list.append(betaNought)
gamma = np.fromstring(vector["gamma"]["$"], dtype=np.float32, sep=" ")
gamma_list.append(gamma)
dn = np.fromstring(vector["dn"]["$"], dtype=np.float32, sep=" ")
dn_list.append(dn)
pixel = np.array(pixel_list)
if not np.allclose(pixel, pixel[0]):
raise ValueError(
"Unable to organise calibration vectors in a regular line-pixel grid"
)
data_vars = {
"azimuth_time": ("line", [np.datetime64(dt) for dt in azimuth_time_list]),
"sigmaNought": (("line", "pixel"), sigmaNought_list),
"betaNought": (("line", "pixel"), betaNought_list),
"gamma": (("line", "pixel"), gamma_list),
"dn": (("line", "pixel"), dn_list),
}
coords = {"line": line_list, "pixel": pixel_list[0]}
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_noise_range_dataset(noise: esa_safe.PathType) -> xr.Dataset:
noise_vectors = esa_safe.parse_tag_as_list(noise, ".//noiseRangeVector", "noise")
azimuth_time_list = []
pixel_list = []
line_list = []
noiseRangeLut_list = []
for vector in noise_vectors:
azimuth_time_list.append(vector["azimuthTime"])
line_list.append(vector["line"])
pixel = np.fromstring(vector["pixel"]["$"], dtype=int, sep=" ")
pixel_list.append(pixel)
noiseRangeLut = np.fromstring(vector["noiseRangeLut"]["$"], dtype=np.float32, sep=" ")
noiseRangeLut_list.append(noiseRangeLut)
pixel = np.array(pixel_list)
if not np.allclose(pixel, pixel[0]):
raise ValueError(
"Unable to organise noise vectors in a regular line-pixel grid"
)
data_vars = {
"azimuth_time": ("line", [np.datetime64(dt) for dt in azimuth_time_list]),
"noiseRangeLut": (("line", "pixel"), noiseRangeLut_list),
}
coords = {"line": line_list, "pixel": pixel_list[0]}
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_noise_azimuth_dataset(noise: esa_safe.PathType) -> xr.Dataset:
noise_vectors = esa_safe.parse_tag_as_list(noise, ".//noiseAzimuthVector", "noise")
first_range_sample = []
line_list = []
noiseAzimuthLut_list = []
for vector in noise_vectors:
first_range_sample.append(vector["firstRangeSample"])
line = np.fromstring(vector["line"]["$"], dtype=int, sep=" ")
line_list.append(line)
noiseAzimuthLut = np.fromstring(vector["noiseAzimuthLut"]["$"], dtype=np.float32, sep=" ")
noiseAzimuthLut_list.append(noiseAzimuthLut)
data_vars = {}
coords = {}
if first_range_sample:
data_vars["noiseAzimuthLut"] = ("line", noiseAzimuthLut_list[0])
coords["line"] = line_list[0]
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_coordinate_conversion_dataset(
annotation_path: esa_safe.PathType,
) -> xr.Dataset:
coordinate_conversion = esa_safe.parse_tag_as_list(
annotation_path, ".//coordinateConversionList/coordinateConversion"
)
gr0 = []
sr0 = []
azimuth_time = []
slant_range_time = []
srgrCoefficients: T.List[T.List[float]] = []
grsrCoefficients: T.List[T.List[float]] = []
for values in coordinate_conversion:
sr0.append(values["sr0"])
gr0.append(values["gr0"])
azimuth_time.append(values["azimuthTime"])
slant_range_time.append(values["slantRangeTime"])
srgrCoefficients.append(
[float(v) for v in values["srgrCoefficients"]["$"].split()]
)
grsrCoefficients.append(
[float(v) for v in values["grsrCoefficients"]["$"].split()]
)
coords: T.Dict[str, T.Any] = {}
data_vars: T.Dict[str, T.Any] = {}
if srgrCoefficients:
coords["azimuth_time"] = [np.datetime64(dt) for dt in azimuth_time]
coords["degree"] = list(range(len(srgrCoefficients[0])))
data_vars["gr0"] = ("azimuth_time", gr0)
data_vars["sr0"] = ("azimuth_time", sr0)
data_vars["slant_range_time"] = ("azimuth_time", slant_range_time)
data_vars["srgrCoefficients"] = (("azimuth_time", "degree"), srgrCoefficients)
data_vars["grsrCoefficients"] = (("azimuth_time", "degree"), grsrCoefficients)
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_gcp_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
geolocation_grid_points = esa_safe.parse_tag_as_list(
annotation, ".//geolocationGridPoint"
)
azimuth_time = []
slant_range_time = []
line_set = set()
pixel_set = set()
for ggp in geolocation_grid_points:
if ggp["line"] not in line_set:
azimuth_time.append(np.datetime64(ggp["azimuthTime"]))
line_set.add(ggp["line"])
if ggp["pixel"] not in pixel_set:
slant_range_time.append(ggp["slantRangeTime"])
pixel_set.add(ggp["pixel"])
shape = (len(azimuth_time), len(slant_range_time))
dims = ("azimuth_time", "slant_range_time")
data_vars = {
"latitude": (dims, np.full(shape, np.nan)),
"longitude": (dims, np.full(shape, np.nan)),
"height": (dims, np.full(shape, np.nan)),
"incidenceAngle": (dims, np.full(shape, np.nan)),
"elevationAngle": (dims, np.full(shape, np.nan)),
}
line = sorted(line_set)
pixel = sorted(pixel_set)
for ggp in geolocation_grid_points:
for var in data_vars:
j = line.index(ggp["line"])
i = pixel.index(ggp["pixel"])
data_vars[var][1][j, i] = ggp[var]
ds = xr.Dataset(
data_vars=data_vars,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"slant_range_time": slant_range_time,
"line": ("azimuth_time", line),
"pixel": ("slant_range_time", pixel),
},
)
return ds
def open_attitude_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
attitudes = esa_safe.parse_tag_as_list(annotation, ".//attitude")
variables = ["q0", "q1", "q2", "q3", "wx", "wy", "wz", "pitch", "roll", "yaw"]
azimuth_time: T.List[T.Any] = []
data_vars: T.Dict[str, T.Any] = {var: ("azimuth_time", []) for var in variables}
for attitude in attitudes:
azimuth_time.append(attitude["time"])
for var in variables:
data_vars[var][1].append(attitude[var])
ds = xr.Dataset(
data_vars=data_vars,
coords={"azimuth_time": [np.datetime64(dt) for dt in azimuth_time]},
)
return ds
def open_orbit_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
orbits = esa_safe.parse_tag_as_list(annotation, ".//orbit")
reference_system = orbits[0]["frame"]
variables = ["position", "velocity"]
data: T.Dict[str, T.List[T.Any]] = {var: [[], [], []] for var in variables}
azimuth_time: T.List[T.Any] = []
for orbit in orbits:
azimuth_time.append(orbit["time"])
data["position"][0].append(orbit["position"]["x"])
data["position"][1].append(orbit["position"]["y"])
data["position"][2].append(orbit["position"]["z"])
data["velocity"][0].append(orbit["velocity"]["x"])
data["velocity"][1].append(orbit["velocity"]["y"])
data["velocity"][2].append(orbit["velocity"]["z"])
if orbit["frame"] != reference_system:
warnings.warn(
"reference_system is not consistent in all the state vectors. "
)
reference_system = None
position = xr.Variable(data=data["position"], dims=("axis", "azimuth_time"))
velocity = xr.Variable(data=data["velocity"], dims=("axis", "azimuth_time"))
attrs = {}
if reference_system is not None:
attrs.update({"reference_system": reference_system})
ds = xr.Dataset(
data_vars={"position": position, "velocity": velocity},
attrs=attrs,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"axis": [0, 1, 2],
},
)
return ds
def open_dc_estimate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
dc_estimates = esa_safe.parse_tag_as_list(annotation, ".//dcEstimate")
azimuth_time = []
t0 = []
data_dc_poly = []
for dc_estimate in dc_estimates:
azimuth_time.append(dc_estimate["azimuthTime"])
t0.append(dc_estimate["t0"])
data_dc_poly.append(
[float(c) for c in dc_estimate["dataDcPolynomial"]["$"].split()]
)
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"data_dc_polynomial": (("azimuth_time", "degree"), data_dc_poly),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(data_dc_poly[0]))),
},
)
return ds
def open_azimuth_fm_rate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
azimuth_fm_rates = esa_safe.parse_tag_as_list(annotation, ".//azimuthFmRate")
azimuth_time = []
t0 = []
azimuth_fm_rate_poly = []
for azimuth_fm_rate in azimuth_fm_rates:
azimuth_time.append(azimuth_fm_rate["azimuthTime"])
t0.append(azimuth_fm_rate["t0"])
azimuth_fm_rate_poly.append(
[float(c) for c in azimuth_fm_rate["azimuthFmRatePolynomial"]["$"].split()]
)
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"azimuth_fm_rate_polynomial": (
("azimuth_time", "degree"),
azimuth_fm_rate_poly,
),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(azimuth_fm_rate_poly[0]))),
},
)
return ds
def find_available_groups(
product_files: T.Dict[str, T.Tuple[str, str, str, str, str]],
product_path: str,
check_files_exist: bool = False,
fs: fsspec.AbstractFileSystem = fsspec.filesystem("file"),
) -> T.Dict[str, T.List[str]]:
groups: T.Dict[str, T.List[str]] = {}
for path, (type, _, swath, polarization, _) in product_files.items():
swath_pol_group = f"{swath}/{polarization}".upper()
abspath = os.path.join(product_path, os.path.normpath(path))
if check_files_exist:
if not fs.exists(abspath):
continue
if type == "s1Level1ProductSchema":
groups[swath.upper()] = [""]
groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])
for metadata_group in [
"orbit",
"attitude",
"azimuth_fm_rate",
"dc_estimate",
"gcp",
"coordinate_conversion",
]:
groups[f"{swath_pol_group}/{metadata_group}"] = [abspath]
elif type == "s1Level1CalibrationSchema":
groups[f"{swath_pol_group}/calibration"] = [abspath]
elif type == "s1Level1NoiseSchema":
groups[f"{swath_pol_group}/noise_range"] = [abspath]
groups[f"{swath_pol_group}/noise_azimuth"] = [abspath]
elif type == "s1Level1MeasurementSchema":
groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])
return groups
def open_pol_dataset(
measurement: esa_safe.PathOrFileType,
annotation: esa_safe.PathOrFileType,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
) -> xr.Dataset:
product_information = esa_safe.parse_tag(annotation, ".//productInformation")
image_information = esa_safe.parse_tag(annotation, ".//imageInformation")
swath_timing = esa_safe.parse_tag(annotation, ".//swathTiming")
incidence_angle_mid_swath = image_information["incidenceAngleMidSwath"]
number_of_samples = image_information["numberOfSamples"]
first_slant_range_time = image_information["slantRangeTime"]
slant_range_time_interval = 1 / product_information["rangeSamplingRate"]
number_of_lines = image_information["numberOfLines"]
first_azimuth_time = image_information["productFirstLineUtcTime"]
azimuth_time_interval = image_information["azimuthTimeInterval"]
number_of_bursts = swath_timing["burstList"]["@count"]
range_pixel_spaxing = image_information["rangePixelSpacing"]
anx_datetime = image_information["ascendingNodeTime"]
attrs = {
"sar:center_frequency": product_information["radarFrequency"] / 10 ** 9,
"sar:pixel_spacing_azimuth": image_information["azimuthPixelSpacing"],
"sar:pixel_spacing_range": range_pixel_spaxing,
"azimuth_time_interval": azimuth_time_interval,
"slant_range_time_interval": slant_range_time_interval,
"incidence_angle_mid_swath": incidence_angle_mid_swath,
"sat:anx_datetime": anx_datetime + "Z",
}
encoding = {}
swap_dims = {}
chunks: T.Union[None, T.Dict[str, int]] = None
azimuth_time = pd.date_range(
start=first_azimuth_time,
periods=number_of_lines,
freq=pd.Timedelta(azimuth_time_interval, "s"),
).values
if number_of_bursts == 0:
swap_dims = {"line": "azimuth_time", "pixel": "slant_range_time"}
else:
lines_per_burst = swath_timing["linesPerBurst"]
attrs.update(
{
"azimuth_steering_rate": product_information["azimuthSteeringRate"],
"number_of_bursts": number_of_bursts,
"lines_per_burst": lines_per_burst,
}
)
for burst_index, burst in enumerate(swath_timing["burstList"]["burst"]):
first_azimuth_time_burst = burst["azimuthTime"]
azimuth_time_burst = pd.date_range(
start=first_azimuth_time_burst,
periods=lines_per_burst,
freq=pd.Timedelta(azimuth_time_interval, "s"),
)
azimuth_time[
lines_per_burst * burst_index : lines_per_burst * (burst_index + 1)
] = azimuth_time_burst
try:
import dask
encoding["preferred_chunks"] = {"line": lines_per_burst}
chunks = {}
except ModuleNotFoundError:
pass
coords = {
"pixel": np.arange(0, number_of_samples, dtype=int),
"line": np.arange(0, number_of_lines, dtype=int),
"azimuth_time": ("line", azimuth_time),
}
if product_information["projection"] == "Slant Range":
slant_range_time = np.linspace(
first_slant_range_time,
first_slant_range_time
+ slant_range_time_interval * (number_of_samples - 1),
number_of_samples,
)
coords["slant_range_time"] = ("pixel", slant_range_time)
elif product_information["projection"] == "Ground Range":
ground_range = np.linspace(
0,
range_pixel_spaxing * (number_of_samples - 1),
number_of_samples,
)
coords["ground_range"] = ("pixel", ground_range)
swap_dims = {"line": "azimuth_time", "pixel": "ground_range"}
else:
raise ValueError(f"unknown projection {product_information['projection']}")
with contextlib.redirect_stderr(open("/dev/null", "w")):
try:
arr = xr.open_dataarray(fs.open(measurement), engine="rasterio", chunks=chunks)
except AttributeError:
arr = xr.open_dataarray(measurement, engine="rasterio")
arr = arr.squeeze("band").drop_vars(["band", "spatial_ref"])
arr = arr.rename({"y": "line", "x": "pixel"})
arr = arr.assign_coords(coords)
arr = arr.swap_dims(swap_dims)
arr.attrs.update(attrs)
arr.encoding.update(encoding)
return xr.Dataset(attrs=attrs, data_vars={"measurement": arr})
def find_bursts_index(
pol_dataset: xr.Dataset,
azimuth_anx_time: float,
use_center: bool = False,
) -> int:
lines_per_burst = pol_dataset.attrs["lines_per_burst"]
anx_datetime = np.datetime64(pol_dataset.attrs["sat:anx_datetime"].replace("Z", ""))
azimuth_anx_time = pd.Timedelta(azimuth_anx_time, unit="s")
if use_center:
azimuth_anx_time_center = (
pol_dataset.azimuth_time[lines_per_burst // 2 :: lines_per_burst]
- anx_datetime
)
distance = abs(azimuth_anx_time_center - azimuth_anx_time)
else:
azimuth_anx_time_first_line = (
pol_dataset.azimuth_time[::lines_per_burst] - anx_datetime
)
distance = abs(azimuth_anx_time_first_line - azimuth_anx_time)
return distance.argmin().item()
def crop_burst_dataset(
pol_dataset: xr.Dataset,
burst_index: T.Optional[int] = None,
azimuth_anx_time: T.Optional[float] = None,
use_center: bool = False,
) -> xr.Dataset:
if (burst_index is not None) and (azimuth_anx_time is not None):
raise TypeError(
"only one keyword between 'burst_index' and 'azimuth_anx_time' must be defined"
)
if burst_index is None:
if azimuth_anx_time is not None:
burst_index = find_bursts_index(
pol_dataset, azimuth_anx_time, use_center=use_center
)
else:
raise TypeError(
"one keyword between 'burst_index' and 'azimuth_anx_time' must be defined"
)
if burst_index < 0 or burst_index >= pol_dataset.attrs["number_of_bursts"]:
raise IndexError(f"burst_index={burst_index} out of bounds")
lines_per_burst = pol_dataset.attrs["lines_per_burst"]
ds = pol_dataset.sel(
line=slice(
lines_per_burst * burst_index, lines_per_burst * (burst_index + 1) - 1
)
)
anx_datetime = np.datetime64(pol_dataset.attrs["sat:anx_datetime"].replace("Z", ""))
burst_azimuth_anx_times = ds.azimuth_time - anx_datetime
ds.attrs["azimuth_anx_time"] = burst_azimuth_anx_times.values[0] / ONE_SECOND
ds = ds.swap_dims({"line": "azimuth_time", "pixel": "slant_range_time"})
ds.attrs["burst_index"] = burst_index
return ds
def mosaic_slc_iw(slc_iw_image: xr.Dataset, crop: int = 90) -> xr.Dataset:
bursts = []
for i in range(slc_iw_image.attrs["number_of_bursts"]):
burst = crop_burst_dataset(slc_iw_image, burst_index=i)
bursts.append(burst.isel(azimuth_time=slice(crop, -crop)))
return xr.concat(bursts, dim="azimuth_time")
def calibrate_amplitude(
digital_number: xr.DataArray, calibration_lut: xr.DataArray
) -> xr.DataArray:
calibration = calibration_lut.interp(
line=digital_number.line,
pixel=digital_number.pixel,
).astype(np.float32)
amplitude = digital_number / calibration
amplitude.attrs.update(digital_number.attrs)
try:
lut_name = calibration_lut.attrs["long_name"].partition("calibration LUT")[0]
amplitude.attrs["long_name"] = f"amplitude for {lut_name}"
amplitude.attrs["units"] = calibration.attrs["units"]
except KeyError:
pass
return amplitude
def calibrate_intensity(
digital_number: xr.DataArray,
calibration_lut: xr.DataArray,
as_db: bool = False,
min_db: T.Optional[float] = -40.0,
) -> xr.DataArray:
amplitude = calibrate_amplitude(digital_number, calibration_lut)
intensity = abs(amplitude) ** 2
if as_db:
intensity = 10.0 * np.log10(intensity)
if min_db is not None:
intensity = np.maximum(intensity, min_db)
intensity.attrs.update(amplitude.attrs)
intensity.attrs["units"] = "dB"
else:
intensity.attrs.update(amplitude.attrs)
intensity.attrs["units"] = "m2 m-2"
try:
lut_name = amplitude.attrs["long_name"].partition("amplitude for ")[2]
intensity.attrs["long_name"] = lut_name
except KeyError:
pass
return intensity
def slant_range_time_to_ground_range(
azimuth_time: xr.DataArray,
slant_range_time: xr.DataArray,
coordinate_conversion: xr.DataArray,
) -> xr.DataArray:
slant_range = SPEED_OF_LIGHT / 2.0 * slant_range_time
cc = coordinate_conversion.interp(azimuth_time=azimuth_time)
x = slant_range - cc.sr0
ground_range = (cc.srgrCoefficients * x ** cc.degree).sum("degree")
return ground_range
def assign_slant_range_time_coord(
measurement: xr.Dataset, coordinate_conversion: xr.Dataset
) -> xr.Dataset:
x = measurement.ground_range - coordinate_conversion.gr0
slant_range = (
coordinate_conversion.grsrCoefficients * x ** coordinate_conversion.degree
).sum(dim="degree")
slant_range_coord = slant_range.interp(
azimuth_time=measurement.azimuth_time, ground_range=measurement.ground_range
).data
slant_range_time = 2 / SPEED_OF_LIGHT * slant_range_coord
measurement = measurement.assign_coords(
slant_range_time=(("azimuth_time", "ground_range"), slant_range_time)
)
return measurement
def build_burst_id(lat: float, lon: float, relative_orbit: int) -> str:
lat = int(round(lat * 10))
lon = int(round(lon * 10))
n_or_s = "N" if lat >= 0 else "S"
e_or_w = "E" if lon >= 0 else "W"
burst_id = f"R{relative_orbit:03}" f"-{n_or_s}{lat:03}" f"-{e_or_w}{lon:04}"
return burst_id
def compute_burst_centres(
gcp: xr.Dataset,
) -> T.Tuple[T.List[float], T.List[float]]:
gcp_rolling = gcp.rolling(azimuth_time=2, min_periods=1)
gc_az_win = gcp_rolling.construct(azimuth_time="az_win")
centre = gc_az_win.mean(["az_win", "slant_range_time"])
centre = centre.isel(azimuth_time=slice(1, None))
return centre.latitude.values.tolist(), centre.longitude.values.tolist()
METADATA_OPENERS = {
"orbit": open_orbit_dataset,
"attitude": open_attitude_dataset,
"azimuth_fm_rate": open_azimuth_fm_rate_dataset,
"dc_estimate": open_dc_estimate_dataset,
"gcp": open_gcp_dataset,
"coordinate_conversion": open_coordinate_conversion_dataset,
"calibration": open_calibration_dataset,
"noise_range": open_noise_range_dataset,
"noise_azimuth": open_noise_azimuth_dataset,
}
def do_override_product_files(
template: str, product_files: T.Dict[str, T.Tuple[str, str, str, str, str]]
) -> T.Dict[str, T.Tuple[str, str, str, str, str]]:
overridden_product_files = {}
for path, description in product_files.items():
type, prefix, swath, polarization, date = description
ext = os.path.splitext(path)[1]
dirname = os.path.dirname(path)
overridden_path = template.format(**locals())
overridden_product_files[overridden_path] = description
return overridden_product_files
def open_sentinel1_dataset(
product_urlpath: esa_safe.PathType,
*,
drop_variables: T.Optional[T.Tuple[str]] = None,
group: T.Optional[str] = None,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
storage_options: T.Optional[T.Dict[str, T.Any]] = None,
check_files_exist: bool = False,
override_product_files: T.Optional[str] = None,
) -> xr.Dataset:
if drop_variables is not None:
warnings.warn("'drop_variables' is currently ignored")
fs, manifest_path = get_fs_path(product_urlpath, fs, storage_options)
if fs.isdir(manifest_path):
manifest_path = os.path.join(manifest_path, "manifest.safe")
product_path = os.path.dirname(manifest_path)
with fs.open(manifest_path) as file:
product_attrs, product_files = esa_safe.parse_manifest_sentinel1(file)
if override_product_files:
product_files = do_override_product_files(override_product_files, product_files)
groups = find_available_groups(
product_files, product_path, check_files_exist=check_files_exist, fs=fs
)
group, burst_index = normalise_group(group)
absgroup = f"/{group}"
if group != "" and group not in groups:
raise ValueError(
f"Invalid group {group!r}, please select one of the following groups:"
f"\n{list(groups.keys())}"
)
metadata = ""
ds = xr.Dataset()
if group == "":
subgroups = list(groups)
else:
subgroups = [
g[len(group) + 1 :] for g in groups if g.startswith(group) and g != group
]
if group.count("/") == 1:
with fs.open(groups[group][1]) as annotation:
ds = open_pol_dataset(groups[group][0], annotation, fs=fs)
elif group.count("/") == 2:
_, _, metadata = group.split("/", 2)
with fs.open(groups[group][0]) as file:
ds = METADATA_OPENERS[metadata](file)
for data_var in ds.data_vars:
ds.data_vars[data_var].attrs.update(product_attrs)
product_attrs["group"] = absgroup
if len(subgroups):
product_attrs["subgroups"] = subgroups
ds.attrs.update(product_attrs)
if group.count("/") == 1 and burst_index is not None:
ds = crop_burst_dataset(ds, burst_index=burst_index)
conventions.update_attributes(ds, group=metadata)
return ds
| true | true |
f7283fda3f312bf5acbd51f353aeab26d065e142 | 640 | py | Python | utils.py | philshem/py-youtube-dl-email | cc358e649e86ecdd1b1b5c36fd55f8709a14a4c2 | [
"MIT"
] | null | null | null | utils.py | philshem/py-youtube-dl-email | cc358e649e86ecdd1b1b5c36fd55f8709a14a4c2 | [
"MIT"
] | null | null | null | utils.py | philshem/py-youtube-dl-email | cc358e649e86ecdd1b1b5c36fd55f8709a14a4c2 | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
# https://murhabazi.com/read-emails-python/
def read_credentails():
"""
Return user’s credentials from the environment variables file and
raise a an exception if the credentials are not present
Raises:
NotImplementedError: [description]
"""
load_dotenv()
USER_EMAIL = os.getenv("USER_EMAIL")
USER_PASSWORD = os.getenv("USER_PASSWORD")
USER_IMAP = os.getenv("USER_IMAP")
if USER_IMAP and USER_EMAIL and USER_PASSWORD:
return USER_IMAP, USER_EMAIL, USER_PASSWORD
else:
raise ValueError('Please add a .env file and write the credentials it it. See .env_sample for an example.') | 27.826087 | 109 | 0.764063 | import os
from dotenv import load_dotenv
def read_credentails():
load_dotenv()
USER_EMAIL = os.getenv("USER_EMAIL")
USER_PASSWORD = os.getenv("USER_PASSWORD")
USER_IMAP = os.getenv("USER_IMAP")
if USER_IMAP and USER_EMAIL and USER_PASSWORD:
return USER_IMAP, USER_EMAIL, USER_PASSWORD
else:
raise ValueError('Please add a .env file and write the credentials it it. See .env_sample for an example.') | true | true |
f7284242f4393ec12e6b2c8a2971cf419d7bdb4c | 479 | py | Python | jp.atcoder/abc201/abc201_c/25914704.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc201/abc201_c/25914704.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc201/abc201_c/25914704.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import itertools
import typing
def solve(s: str) -> typing.NoReturn:
n = 10
cand = []
must = 0
for i in range(n):
if s[i] == 'o':
cand.append(i)
must |= 1 << i
if s[i] == '?':
cand.append(i)
cnt = 0
for prod in itertools.product(cand, repeat=4):
res = 0
for i in prod:
res |= 1 << i
cnt += must & ~res == 0
print(cnt)
def main() -> typing.NoReturn:
s = input()
solve(s)
main()
| 14.515152 | 49 | 0.478079 | import itertools
import typing
def solve(s: str) -> typing.NoReturn:
n = 10
cand = []
must = 0
for i in range(n):
if s[i] == 'o':
cand.append(i)
must |= 1 << i
if s[i] == '?':
cand.append(i)
cnt = 0
for prod in itertools.product(cand, repeat=4):
res = 0
for i in prod:
res |= 1 << i
cnt += must & ~res == 0
print(cnt)
def main() -> typing.NoReturn:
s = input()
solve(s)
main()
| true | true |
f72843936c76a8e2157fea40a8e78716b137635c | 290 | py | Python | examples/example.py | gitcheol/selective_search | 896d849320a8b197210a2e15c56ed4c93a3e1fd8 | [
"MIT"
] | 34 | 2019-10-06T18:47:22.000Z | 2022-03-24T19:22:53.000Z | examples/example.py | gitcheol/selective_search | 896d849320a8b197210a2e15c56ed4c93a3e1fd8 | [
"MIT"
] | 5 | 2020-05-10T06:55:49.000Z | 2022-02-09T02:15:50.000Z | examples/example.py | gitcheol/selective_search | 896d849320a8b197210a2e15c56ed4c93a3e1fd8 | [
"MIT"
] | 15 | 2020-02-03T06:05:15.000Z | 2022-02-08T11:14:07.000Z | import skimage
import selective_search
image = skimage.data.astronaut()
# Propose boxes
boxes = selective_search.selective_search(image, mode='single', random_sort=True)
# Filter box proposals
boxes_filter = selective_search.box_filter(boxes, min_size=20, topN=80)
print(boxes_filter)
| 20.714286 | 81 | 0.803448 | import skimage
import selective_search
image = skimage.data.astronaut()
boxes = selective_search.selective_search(image, mode='single', random_sort=True)
boxes_filter = selective_search.box_filter(boxes, min_size=20, topN=80)
print(boxes_filter)
| true | true |
f728440ef281188f102539237d437dea70ef31b8 | 2,515 | py | Python | framework/Agent.py | citang/Python-Web-Framework | 6c27a1ad656ac59ba25467fbe4800fb3745bcfa6 | [
"Apache-2.0"
] | null | null | null | framework/Agent.py | citang/Python-Web-Framework | 6c27a1ad656ac59ba25467fbe4800fb3745bcfa6 | [
"Apache-2.0"
] | null | null | null | framework/Agent.py | citang/Python-Web-Framework | 6c27a1ad656ac59ba25467fbe4800fb3745bcfa6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@FileName : Agent.py
@Author : citang
@Date : 2021/7/27 5:46 下午
@Description : description the function of the file
"""
import sys
from framework import Model, Db, Log, Config, Common
class __Agent__:
"""模块功能"""
def __init__(self, resultype, mod, handler, ip):
self.__DATA = None
self.__RESULTYPE = resultype
self.__APICODE = 200
self.__MODULENAME = mod
self.__HANDLERNAME = handler
self.__REMOTE_IP = ip
def Data(self, name):
"""创建数据模型对象"""
return Model.__ModelData__(name)
def Db(self):
"""创建数据库对象"""
return Db.__MysqlDb__()
def Log(self):
"""创建日志对象"""
return Log.__Module__(self.__MODULENAME, self.__HANDLERNAME)
def __Cache(self):
"""创建缓存对象"""
pass
def GetAppConfig(self, group, name):
"""获取应用程序配置"""
return Config.GetAppConfig(group, name)
def GetSysConfig(self, group, name):
"""获取系统配置"""
return Config.GetSysConfig(group, name)
def SetApiCode(self, code):
"""设置API错误代码"""
self.__APICODE = str(code)
def GetApiCode(self):
"""获取API错误代码"""
return self.__APICODE
def GetRemoteIp(self):
"""获取请求IP"""
return self.__REMOTE_IP
def SetResult(self, data):
"""设置返回内容"""
# 若没有设置RESULTYPE则不允许设置返回值
if self.__RESULTYPE == '':
raise Exception('resultype is empty, cant set result')
# 检查数据格式
if data is None:
raise Exception('must not none of data')
if data.GetName() != self.__RESULTYPE:
raise Exception('router resultype different!')
self.__DATA = data.DumpDict()
def SetDictData(self, data):
"""设置返回内容"""
if not isinstance(data, dict):
raise Exception('data type must be dict')
# 检查数据格式
if data is None:
raise Exception('must not none of data')
self.__DATA = data
def GetResult(self):
return self.__DATA
def ImportMod(self, mod):
path = Common.ExtendPath(Config.GetSysConfig('AppSettings', 'module_path'))
if '/' in mod:
modPath = mod[0:mod.rfind('/')]
sys.path.append(path + '/' + modPath)
mod = mod[mod.rfind('/') + 1:]
else:
sys.path.append(path)
impmod = __import__(mod)
sys.path.pop()
return impmod
| 23.726415 | 83 | 0.560636 |
import sys
from framework import Model, Db, Log, Config, Common
class __Agent__:
def __init__(self, resultype, mod, handler, ip):
self.__DATA = None
self.__RESULTYPE = resultype
self.__APICODE = 200
self.__MODULENAME = mod
self.__HANDLERNAME = handler
self.__REMOTE_IP = ip
def Data(self, name):
return Model.__ModelData__(name)
def Db(self):
return Db.__MysqlDb__()
def Log(self):
return Log.__Module__(self.__MODULENAME, self.__HANDLERNAME)
def __Cache(self):
pass
def GetAppConfig(self, group, name):
return Config.GetAppConfig(group, name)
def GetSysConfig(self, group, name):
return Config.GetSysConfig(group, name)
def SetApiCode(self, code):
self.__APICODE = str(code)
def GetApiCode(self):
return self.__APICODE
def GetRemoteIp(self):
return self.__REMOTE_IP
def SetResult(self, data):
if self.__RESULTYPE == '':
raise Exception('resultype is empty, cant set result')
if data is None:
raise Exception('must not none of data')
if data.GetName() != self.__RESULTYPE:
raise Exception('router resultype different!')
self.__DATA = data.DumpDict()
def SetDictData(self, data):
if not isinstance(data, dict):
raise Exception('data type must be dict')
if data is None:
raise Exception('must not none of data')
self.__DATA = data
def GetResult(self):
return self.__DATA
def ImportMod(self, mod):
path = Common.ExtendPath(Config.GetSysConfig('AppSettings', 'module_path'))
if '/' in mod:
modPath = mod[0:mod.rfind('/')]
sys.path.append(path + '/' + modPath)
mod = mod[mod.rfind('/') + 1:]
else:
sys.path.append(path)
impmod = __import__(mod)
sys.path.pop()
return impmod
| true | true |
f72844b0c4c7443bd26d04746672badc813614df | 1,070 | py | Python | wrappers/python/tests/gateway/test_get_scratchpad_status.py | vvalkonen/backend-apis | 769a45e6a90a87ab5af78e9a50ebde12f4821b99 | [
"Apache-2.0"
] | 9 | 2019-12-20T06:41:37.000Z | 2020-09-21T03:34:47.000Z | wrappers/python/tests/gateway/test_get_scratchpad_status.py | vvalkonen/backend-apis | 769a45e6a90a87ab5af78e9a50ebde12f4821b99 | [
"Apache-2.0"
] | 38 | 2019-05-09T09:55:01.000Z | 2022-01-04T10:52:46.000Z | wrappers/python/tests/gateway/test_get_scratchpad_status.py | vvalkonen/backend-apis | 769a45e6a90a87ab5af78e9a50ebde12f4821b99 | [
"Apache-2.0"
] | 13 | 2019-10-29T19:51:08.000Z | 2021-11-25T15:08:02.000Z | # flake8: noqa
import wirepas_messaging
from default_value import *
def test_generate_parse_request():
# Clear a scratchpad
request = wirepas_messaging.gateway.api.GetScratchpadStatusRequest(
SINK_ID, REQUEST_ID
)
request2 = wirepas_messaging.gateway.api.GetScratchpadStatusRequest.from_payload(
request.payload
)
for k, v in request.__dict__.items():
assert v == request2.__dict__[k]
def test_generate_parse_response():
request = wirepas_messaging.gateway.api.GetScratchpadStatusResponse(
REQUEST_ID,
GATEWAY_ID,
RES_OK,
SINK_ID,
SCRATCHPAD_INFO,
SCRATCHPAD_STATUS,
SCRATCHPAD_TYPE,
SCRATCHPAD_INFO,
FIRMWARE_AREA_ID,
)
request2 = wirepas_messaging.gateway.api.GetScratchpadStatusResponse.from_payload(
request.payload
)
for k, v in request.__dict__.items():
if isinstance(v, enum.Enum):
assert v.value == request2.__dict__[k].value
else:
assert v == request2.__dict__[k]
| 24.883721 | 86 | 0.671028 |
import wirepas_messaging
from default_value import *
def test_generate_parse_request():
request = wirepas_messaging.gateway.api.GetScratchpadStatusRequest(
SINK_ID, REQUEST_ID
)
request2 = wirepas_messaging.gateway.api.GetScratchpadStatusRequest.from_payload(
request.payload
)
for k, v in request.__dict__.items():
assert v == request2.__dict__[k]
def test_generate_parse_response():
request = wirepas_messaging.gateway.api.GetScratchpadStatusResponse(
REQUEST_ID,
GATEWAY_ID,
RES_OK,
SINK_ID,
SCRATCHPAD_INFO,
SCRATCHPAD_STATUS,
SCRATCHPAD_TYPE,
SCRATCHPAD_INFO,
FIRMWARE_AREA_ID,
)
request2 = wirepas_messaging.gateway.api.GetScratchpadStatusResponse.from_payload(
request.payload
)
for k, v in request.__dict__.items():
if isinstance(v, enum.Enum):
assert v.value == request2.__dict__[k].value
else:
assert v == request2.__dict__[k]
| true | true |
f7284521646ce37856371178fae34fe4c113ed42 | 827 | py | Python | coverage.py | Adilet1/beta_career | 6c484bc06852b587bd4dadee27220e4aa41700c2 | [
"MIT"
] | null | null | null | coverage.py | Adilet1/beta_career | 6c484bc06852b587bd4dadee27220e4aa41700c2 | [
"MIT"
] | null | null | null | coverage.py | Adilet1/beta_career | 6c484bc06852b587bd4dadee27220e4aa41700c2 | [
"MIT"
] | null | null | null | import os
import subprocess
def run_test_coverage():
"""
Simple run coverage and do:
- Runs the tests
- Check your test coverage
- Generates HTML coverage report under "htmlcov" directory.
"""
py_test_command = "coverage run -m pytest"
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
try:
subprocess.run(py_test_command.split())
coverage_dir = os.path.join(CURRENT_DIR, "htmlcov")
os.chdir(coverage_dir)
except AttributeError:
print("Please activate your local virtual environment and re-run this script.")
def run_http_server():
"""Up & Run Simple HTTP Server with 8080 port."""
command = "python -m http.server 8080"
subprocess.run(command.split())
if __name__ == "__main__":
run_test_coverage()
run_http_server()
| 25.84375 | 87 | 0.674728 | import os
import subprocess
def run_test_coverage():
py_test_command = "coverage run -m pytest"
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
try:
subprocess.run(py_test_command.split())
coverage_dir = os.path.join(CURRENT_DIR, "htmlcov")
os.chdir(coverage_dir)
except AttributeError:
print("Please activate your local virtual environment and re-run this script.")
def run_http_server():
command = "python -m http.server 8080"
subprocess.run(command.split())
if __name__ == "__main__":
run_test_coverage()
run_http_server()
| true | true |
f72845befa374d03fd40de9ef5623b59ba01c745 | 3,238 | py | Python | downloader.py | ogenesYi/YNOTE_HELPER | d95ab22514a62d136ad32f612a88369ffc4c36f6 | [
"MIT"
] | 1 | 2022-03-16T12:46:54.000Z | 2022-03-16T12:46:54.000Z | downloader.py | ogenesYi/YNOTE_HELPER | d95ab22514a62d136ad32f612a88369ffc4c36f6 | [
"MIT"
] | 1 | 2022-03-29T05:51:49.000Z | 2022-03-29T05:51:49.000Z | downloader.py | ogenes/YNOTE_HELPER | d95ab22514a62d136ad32f612a88369ffc4c36f6 | [
"MIT"
] | 1 | 2022-02-17T13:56:14.000Z | 2022-02-17T13:56:14.000Z | import json
import os
import requests
def config():
with open(os.path.dirname(os.path.abspath(__file__)) + "/config.json") as config_file:
data = config_file.read()
return json.loads(data)
def request(api):
ynote_sess = config().get('YNOTE_SESS', '')
ynote_login = config().get('YNOTE_LOGIN', '')
if ynote_login == '' or ynote_sess == '':
raise Exception('请先在网页端登录并复制对应Cookie值保存到config.json中,详情见 README ')
http_header = {
'Cookie': 'YNOTE_SESS={YNOTE_SESS}; YNOTE_LOGIN={YNOTE_LOGIN}'.format(
YNOTE_SESS=ynote_sess,
YNOTE_LOGIN=ynote_login
)
}
return requests.get(url=api, headers=http_header)
def list_entire_by_parent_path(base_dir):
url = "https://note.youdao.com/yws/api/personal/file?method=listEntireByParentPath" \
"&_system=macos&sev=j1&path=/&dirOnly=true&=true"
resp = request(url)
ret = []
if resp.status_code == 200:
text = json.loads(resp.text)
for _ in text:
ret.append({
"id": _['fileEntry']['id'],
"name": _['fileEntry']['name'],
"dir": _['fileEntry']['dir'],
"basedir": base_dir,
})
return ret
def list_page_by_parent_id(parent_id, base_dir):
url = "https://note.youdao.com/yws/api/personal/file/{id}?all=true&f=true&len=300&sort=1" \
"&isReverse=false&method=listPageByParentId&_system=macos&sev=j1".format(id=parent_id)
resp = request(url)
ret = []
if resp.status_code == 200:
text = json.loads(resp.text)
for _ in text['entries']:
tmp = {
"id": _['fileEntry']['id'],
"name": _['fileEntry']['name'],
"dir": _['fileEntry']['dir'],
"basedir": base_dir,
}
if tmp.get("dir", False):
ret += list_page_by_parent_id(tmp.get('id'), base_dir + tmp.get('name') + '/')
else:
ret.append(tmp)
return ret
def download(file_id):
url = "https://note.youdao.com/yws/api/personal/sync?method=download&_system=macos&sev=j1" \
"&fileId={id}&version=-1&read=true".format(id=file_id)
resp = request(url)
if resp.status_code == 200:
ret = resp.text
else:
ret = ''
return ret
if __name__ == '__main__':
basedir = os.path.dirname(os.path.abspath(__file__)) + '/note/'
if not os.path.exists(basedir):
os.makedirs(basedir, 0o755)
try:
parents = list_entire_by_parent_path(basedir)
docs = []
for _ in parents:
parent_dir = _['basedir'] + _['name'] + '/'
tmp_docs = list_page_by_parent_id(_['id'], parent_dir)
docs += tmp_docs
print('%s: %d' % (parent_dir, len(tmp_docs)))
for _ in docs:
doc_dir = _['basedir']
if not os.path.exists(doc_dir):
os.makedirs(doc_dir, 0o755)
filename = doc_dir + _['name']
print(filename)
content = download(_['id'])
with open(filename, 'w') as f:
f.write(content)
print('Over !')
except Exception as err:
print(err)
| 30.261682 | 96 | 0.556516 | import json
import os
import requests
def config():
with open(os.path.dirname(os.path.abspath(__file__)) + "/config.json") as config_file:
data = config_file.read()
return json.loads(data)
def request(api):
ynote_sess = config().get('YNOTE_SESS', '')
ynote_login = config().get('YNOTE_LOGIN', '')
if ynote_login == '' or ynote_sess == '':
raise Exception('请先在网页端登录并复制对应Cookie值保存到config.json中,详情见 README ')
http_header = {
'Cookie': 'YNOTE_SESS={YNOTE_SESS}; YNOTE_LOGIN={YNOTE_LOGIN}'.format(
YNOTE_SESS=ynote_sess,
YNOTE_LOGIN=ynote_login
)
}
return requests.get(url=api, headers=http_header)
def list_entire_by_parent_path(base_dir):
url = "https://note.youdao.com/yws/api/personal/file?method=listEntireByParentPath" \
"&_system=macos&sev=j1&path=/&dirOnly=true&=true"
resp = request(url)
ret = []
if resp.status_code == 200:
text = json.loads(resp.text)
for _ in text:
ret.append({
"id": _['fileEntry']['id'],
"name": _['fileEntry']['name'],
"dir": _['fileEntry']['dir'],
"basedir": base_dir,
})
return ret
def list_page_by_parent_id(parent_id, base_dir):
url = "https://note.youdao.com/yws/api/personal/file/{id}?all=true&f=true&len=300&sort=1" \
"&isReverse=false&method=listPageByParentId&_system=macos&sev=j1".format(id=parent_id)
resp = request(url)
ret = []
if resp.status_code == 200:
text = json.loads(resp.text)
for _ in text['entries']:
tmp = {
"id": _['fileEntry']['id'],
"name": _['fileEntry']['name'],
"dir": _['fileEntry']['dir'],
"basedir": base_dir,
}
if tmp.get("dir", False):
ret += list_page_by_parent_id(tmp.get('id'), base_dir + tmp.get('name') + '/')
else:
ret.append(tmp)
return ret
def download(file_id):
url = "https://note.youdao.com/yws/api/personal/sync?method=download&_system=macos&sev=j1" \
"&fileId={id}&version=-1&read=true".format(id=file_id)
resp = request(url)
if resp.status_code == 200:
ret = resp.text
else:
ret = ''
return ret
if __name__ == '__main__':
basedir = os.path.dirname(os.path.abspath(__file__)) + '/note/'
if not os.path.exists(basedir):
os.makedirs(basedir, 0o755)
try:
parents = list_entire_by_parent_path(basedir)
docs = []
for _ in parents:
parent_dir = _['basedir'] + _['name'] + '/'
tmp_docs = list_page_by_parent_id(_['id'], parent_dir)
docs += tmp_docs
print('%s: %d' % (parent_dir, len(tmp_docs)))
for _ in docs:
doc_dir = _['basedir']
if not os.path.exists(doc_dir):
os.makedirs(doc_dir, 0o755)
filename = doc_dir + _['name']
print(filename)
content = download(_['id'])
with open(filename, 'w') as f:
f.write(content)
print('Over !')
except Exception as err:
print(err)
| true | true |
f72847d7c434145ce28957ed9f6f7e61c72ff185 | 20,788 | bzl | Python | third_party/repositories/scala_2_11.bzl | urianchang/rules_scala | 70a97425774a59a282853922136f7403ad9c8069 | [
"Apache-2.0"
] | null | null | null | third_party/repositories/scala_2_11.bzl | urianchang/rules_scala | 70a97425774a59a282853922136f7403ad9c8069 | [
"Apache-2.0"
] | null | null | null | third_party/repositories/scala_2_11.bzl | urianchang/rules_scala | 70a97425774a59a282853922136f7403ad9c8069 | [
"Apache-2.0"
] | null | null | null | artifacts = {
"io_bazel_rules_scala_scala_library": {
"artifact": "org.scala-lang:scala-library:2.11.12",
"sha256": "0b3d6fd42958ee98715ba2ec5fe221f4ca1e694d7c981b0ae0cd68e97baf6dce",
},
"io_bazel_rules_scala_scala_compiler": {
"artifact": "org.scala-lang:scala-compiler:2.11.12",
"sha256": "3e892546b72ab547cb77de4d840bcfd05c853e73390fed7370a8f19acb0735a0",
},
"io_bazel_rules_scala_scala_reflect": {
"artifact": "org.scala-lang:scala-reflect:2.11.12",
"sha256": "6ba385b450a6311a15c918cf8688b9af9327c6104f0ecbd35933cfcd3095fe04",
},
"io_bazel_rules_scala_scalatest": {
"artifact": "org.scalatest:scalatest_2.11:3.1.2",
"sha256": "5a61de4a55b9bd1ce2b2936200c4d5b0b05d96ac9727d361ee37f7a5add5d86a",
},
"io_bazel_rules_scala_scalactic": {
"artifact": "org.scalactic:scalactic_2.11:3.1.2",
"sha256": "60642da4dcfa1e1fae02c394e9d8a1ce4c08f1b189bae86b8f3809310c12c29b",
},
"io_bazel_rules_scala_scala_xml": {
"artifact": "org.scala-lang.modules:scala-xml_2.11:1.2.0",
"sha256": "eaddac168ef1e28978af768706490fa4358323a08964c25fa1027c52238e3702",
},
"io_bazel_rules_scala_scala_parser_combinators": {
"artifact": "org.scala-lang.modules:scala-parser-combinators_2.11:1.1.2",
"sha256": "3e0889e95f5324da6420461f7147cb508241ed957ac5cfedc25eef19c5448f26",
},
"org_scalameta_common": {
"artifact": "org.scalameta:common_2.11:4.3.0",
"sha256": "6330798bcbd78d14d371202749f32efda0465c3be5fd057a6055a67e21335ba0",
"deps": [
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
],
},
"org_scalameta_fastparse": {
"artifact": "org.scalameta:fastparse_2.11:1.0.1",
"sha256": "49ecc30a4b47efc0038099da0c97515cf8f754ea631ea9f9935b36ca7d41b733",
"deps": [
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
"@org_scalameta_fastparse_utils",
],
},
"org_scalameta_fastparse_utils": {
"artifact": "org.scalameta:fastparse-utils_2.11:1.0.1",
"sha256": "93f58db540e53178a686621f7a9c401307a529b68e051e38804394a2a86cea94",
"deps": [
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
],
},
"org_scala_lang_modules_scala_collection_compat": {
"artifact": "org.scala-lang.modules:scala-collection-compat_2.11:2.1.2",
"sha256": "e9667b8b7276aeb42599f536fe4d7caab06eabc55e9995572267ad60c7a11c8b",
"deps": [
"@io_bazel_rules_scala_scala_library",
],
},
"org_scalameta_parsers": {
"artifact": "org.scalameta:parsers_2.11:4.3.0",
"sha256": "724382abfac27b32dec6c21210562bc7e1b09b5268ccb704abe66dcc8844beeb",
"deps": [
"@io_bazel_rules_scala_scala_library",
"@org_scalameta_trees",
],
},
"org_scalameta_scalafmt_core": {
"artifact": "org.scalameta:scalafmt-core_2.11:2.3.2",
"sha256": "6bf391e0e1d7369fda83ddaf7be4d267bf4cbccdf2cc31ff941999a78c30e67f",
"deps": [
"@com_geirsson_metaconfig_core",
"@com_geirsson_metaconfig_typesafe_config",
"@io_bazel_rules_scala_scala_library",
"@io_bazel_rules_scala_scala_reflect",
"@org_scalameta_scalameta",
"@org_scala_lang_modules_scala_collection_compat",
],
},
"org_scalameta_scalameta": {
"artifact": "org.scalameta:scalameta_2.11:4.3.0",
"sha256": "94fe739295447cd3ae877c279ccde1def06baea02d9c76a504dda23de1d90516",
"deps": [
"@io_bazel_rules_scala_scala_library",
"@org_scala_lang_scalap",
"@org_scalameta_parsers",
],
},
"org_scalameta_trees": {
"artifact": "org.scalameta:trees_2.11:4.3.0",
"sha256": "d24d5d63d8deafe646d455c822593a66adc6fdf17c8373754a3834a6e92a8a72",
"deps": [
"@com_thesamet_scalapb_scalapb_runtime",
"@io_bazel_rules_scala_scala_library",
"@org_scalameta_common",
"@org_scalameta_fastparse",
],
},
"org_typelevel_paiges_core": {
"artifact": "org.typelevel:paiges-core_2.11:0.2.4",
"sha256": "aa66fbe0457ca5cb5b9e522d4cb873623bb376a2e1ff58c464b5194c1d87c241",
"deps": [
"@io_bazel_rules_scala_scala_library",
],
},
"com_typesafe_config": {
"artifact": "com.typesafe:config:1.3.3",
"sha256": "b5f1d6071f1548d05be82f59f9039c7d37a1787bd8e3c677e31ee275af4a4621",
},
"org_scala_lang_scalap": {
"artifact": "org.scala-lang:scalap:2.11.12",
"sha256": "a6dd7203ce4af9d6185023d5dba9993eb8e80584ff4b1f6dec574a2aba4cd2b7",
"deps": [
"@io_bazel_rules_scala_scala_compiler",
],
},
"com_thesamet_scalapb_lenses": {
"artifact": "com.thesamet.scalapb:lenses_2.11:0.9.0",
"sha256": "f4809760edee6abc97a7fe9b7fd6ae5fe1006795b1dc3963ab4e317a72f1a385",
"deps": [
"@io_bazel_rules_scala_scala_library",
],
},
"com_thesamet_scalapb_scalapb_runtime": {
"artifact": "com.thesamet.scalapb:scalapb-runtime_2.11:0.9.0",
"sha256": "ab1e449a18a9ce411eb3fec31bdbca5dd5fae4475b1557bb5e235a7b54738757",
"deps": [
"@com_google_protobuf_protobuf_java",
"@com_lihaoyi_fastparse",
"@com_thesamet_scalapb_lenses",
"@io_bazel_rules_scala_scala_library",
],
},
"com_lihaoyi_fansi": {
"artifact": "com.lihaoyi:fansi_2.11:0.2.5",
"sha256": "1ff0a8304f322c1442e6bcf28fab07abf3cf560dd24573dbe671249aee5fc488",
"deps": [
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
],
},
"com_lihaoyi_fastparse": {
"artifact": "com.lihaoyi:fastparse_2.11:2.1.2",
"sha256": "5c5d81f90ada03ac5b21b161864a52558133951031ee5f6bf4d979e8baa03628",
"deps": [
"@com_lihaoyi_sourcecode",
],
},
"com_lihaoyi_pprint": {
"artifact": "com.lihaoyi:pprint_2.11:0.5.3",
"sha256": "fb5e4921e7dff734d049e752a482d3a031380d3eea5caa76c991312dee9e6991",
"deps": [
"@com_lihaoyi_fansi",
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
],
},
"com_lihaoyi_sourcecode": {
"artifact": "com.lihaoyi:sourcecode_2.11:0.1.7",
"sha256": "33516d7fd9411f74f05acfd5274e1b1889b7841d1993736118803fc727b2d5fc",
"deps": [
"@io_bazel_rules_scala_scala_library",
],
},
"com_google_protobuf_protobuf_java": {
"artifact": "com.google.protobuf:protobuf-java:3.10.0",
"sha256": "161d7d61a8cb3970891c299578702fd079646e032329d6c2cabf998d191437c9",
},
"com_geirsson_metaconfig_core": {
"artifact": "com.geirsson:metaconfig-core_2.11:0.9.4",
"sha256": "5d5704a1f1c4f74aed26248eeb9b577274d570b167cec0bf51d2908609c29118",
"deps": [
"@com_lihaoyi_pprint",
"@io_bazel_rules_scala_scala_library",
"@org_typelevel_paiges_core",
"@org_scala_lang_modules_scala_collection_compat",
],
},
"com_geirsson_metaconfig_typesafe_config": {
"artifact": "com.geirsson:metaconfig-typesafe-config_2.11:0.9.4",
"sha256": "52d2913640f4592402aeb2f0cec5004893d02acf26df4aa1cf8d4dcb0d2b21c7",
"deps": [
"@com_geirsson_metaconfig_core",
"@com_typesafe_config",
"@io_bazel_rules_scala_scala_library",
"@org_scala_lang_modules_scala_collection_compat",
],
},
"io_bazel_rules_scala_org_openjdk_jmh_jmh_core": {
"artifact": "org.openjdk.jmh:jmh-core:1.20",
"sha256": "1688db5110ea6413bf63662113ed38084106ab1149e020c58c5ac22b91b842ca",
},
"io_bazel_rules_scala_org_openjdk_jmh_jmh_generator_asm": {
"artifact": "org.openjdk.jmh:jmh-generator-asm:1.20",
"sha256": "2dd4798b0c9120326310cda3864cc2e0035b8476346713d54a28d1adab1414a5",
},
"io_bazel_rules_scala_org_openjdk_jmh_jmh_generator_reflection": {
"artifact": "org.openjdk.jmh:jmh-generator-reflection:1.20",
"sha256": "57706f7c8278272594a9afc42753aaf9ba0ba05980bae0673b8195908d21204e",
},
"io_bazel_rules_scala_org_ows2_asm_asm": {
"artifact": "org.ow2.asm:asm:6.1.1",
"sha256": "dd3b546415dd4bade2ebe3b47c7828ab0623ee2336604068e2d81023f9f8d833",
},
"io_bazel_rules_scala_net_sf_jopt_simple_jopt_simple": {
"artifact": "net.sf.jopt-simple:jopt-simple:4.6",
"sha256": "3fcfbe3203c2ea521bf7640484fd35d6303186ea2e08e72f032d640ca067ffda",
},
"io_bazel_rules_scala_org_apache_commons_commons_math3": {
"artifact": "org.apache.commons:commons-math3:3.6.1",
"sha256": "1e56d7b058d28b65abd256b8458e3885b674c1d588fa43cd7d1cbb9c7ef2b308",
},
"io_bazel_rules_scala_junit_junit": {
"artifact": "junit:junit:4.12",
"sha256": "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
},
"io_bazel_rules_scala_org_hamcrest_hamcrest_core": {
"artifact": "org.hamcrest:hamcrest-core:1.3",
"sha256": "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
},
"io_bazel_rules_scala_org_specs2_specs2_common": {
"artifact": "org.specs2:specs2-common_2.11:4.4.1",
"sha256": "52d7c0da58725606e98c6e8c81d2efe632053520a25da9140116d04a4abf9d2c",
"deps": [
"@io_bazel_rules_scala_org_specs2_specs2_fp",
],
},
"io_bazel_rules_scala_org_specs2_specs2_core": {
"artifact": "org.specs2:specs2-core_2.11:4.4.1",
"sha256": "8e95cb7e347e7a87e7a80466cbd88419ece1aaacb35c32e8bd7d299a623b31b9",
"deps": [
"@io_bazel_rules_scala_org_specs2_specs2_common",
"@io_bazel_rules_scala_org_specs2_specs2_matcher",
],
},
"io_bazel_rules_scala_org_specs2_specs2_fp": {
"artifact": "org.specs2:specs2-fp_2.11:4.4.1",
"sha256": "e43006fdd0726ffcd1e04c6c4d795176f5f765cc787cc09baebe1fcb009e4462",
},
"io_bazel_rules_scala_org_specs2_specs2_matcher": {
"artifact": "org.specs2:specs2-matcher_2.11:4.4.1",
"sha256": "448e5ab89d4d650d23030fdbee66a010a07dcac5e4c3e73ef5fe39ca1aace1cd",
"deps": [
"@io_bazel_rules_scala_org_specs2_specs2_common",
],
},
"io_bazel_rules_scala_org_specs2_specs2_junit": {
"artifact": "org.specs2:specs2-junit_2.11:4.4.1",
"sha256": "a8549d52e87896624200fe35ef7b841c1c698a8fb5d97d29bf082762aea9bb72",
"deps": [
"@io_bazel_rules_scala_org_specs2_specs2_core",
],
},
"scala_proto_rules_scalapb_plugin": {
"artifact": "com.thesamet.scalapb:compilerplugin_2.11:0.9.7",
"sha256": "2d6793fa2565953ef2b5094fc37fae4933f3c42e4cb4048d54e7f358ec104a87",
},
"scala_proto_rules_protoc_bridge": {
"artifact": "com.thesamet.scalapb:protoc-bridge_2.11:0.7.14",
"sha256": "314e34bf331b10758ff7a780560c8b5a5b09e057695a643e33ab548e3d94aa03",
},
"scala_proto_rules_scalapb_runtime": {
"artifact": "com.thesamet.scalapb:scalapb-runtime_2.11:0.9.7",
"sha256": "5131033e9536727891a38004ec707a93af1166cb8283c7db711c2c105fbf289e",
},
"scala_proto_rules_scalapb_runtime_grpc": {
"artifact": "com.thesamet.scalapb:scalapb-runtime-grpc_2.11:0.9.7",
"sha256": "24d19df500ce6450d8f7aa72a9bad675fa4f3650f7736d548aa714058f887e23",
},
"scala_proto_rules_scalapb_lenses": {
"artifact": "com.thesamet.scalapb:lenses_2.11:0.9.7",
"sha256": "f8e3b526ceac998652b296014e9ab4c0ab906a40837dd1dfcf6948b6f5a1a8bf",
},
"scala_proto_rules_scalapb_fastparse": {
"artifact": "com.lihaoyi:fastparse_2.11:2.1.2",
"sha256": "5c5d81f90ada03ac5b21b161864a52558133951031ee5f6bf4d979e8baa03628",
},
"scala_proto_rules_grpc_core": {
"artifact": "io.grpc:grpc-core:1.24.0",
"sha256": "8fc900625a9330b1c155b5423844d21be0a5574fe218a63170a16796c6f7880e",
},
"scala_proto_rules_grpc_api": {
"artifact": "io.grpc:grpc-api:1.24.0",
"sha256": "553978366e04ee8ddba64afde3b3cf2ac021a2f3c2db2831b6491d742b558598",
},
"scala_proto_rules_grpc_stub": {
"artifact": "io.grpc:grpc-stub:1.24.0",
"sha256": "eaa9201896a77a0822e26621b538c7154f00441a51c9b14dc9e1ec1f2acfb815",
},
"scala_proto_rules_grpc_protobuf": {
"artifact": "io.grpc:grpc-protobuf:1.24.0",
"sha256": "88cd0838ea32893d92cb214ea58908351854ed8de7730be07d5f7d19025dd0bc",
},
"scala_proto_rules_grpc_netty": {
"artifact": "io.grpc:grpc-netty:1.24.0",
"sha256": "8478333706ba442a354c2ddb8832d80a5aef71016e8a9cf07e7bf6e8c298f042",
},
"scala_proto_rules_grpc_context": {
"artifact": "io.grpc:grpc-context:1.24.0",
"sha256": "1f0546e18789f7445d1c5a157010a11bc038bbb31544cdb60d9da3848efcfeea",
},
"scala_proto_rules_perfmark_api": {
"artifact": "io.perfmark:perfmark-api:0.17.0",
"sha256": "816c11409b8a0c6c9ce1cda14bed526e7b4da0e772da67c5b7b88eefd41520f9",
},
"scala_proto_rules_guava": {
"artifact": "com.google.guava:guava:26.0-android",
"sha256": "1d044ebb866ef08b7d04e998b4260c9b52fab6e6d6b68d207859486bb3686cd5",
},
"scala_proto_rules_google_instrumentation": {
"artifact": "com.google.instrumentation:instrumentation-api:0.3.0",
"sha256": "671f7147487877f606af2c7e39399c8d178c492982827305d3b1c7f5b04f1145",
},
"scala_proto_rules_netty_codec": {
"artifact": "io.netty:netty-codec:4.1.32.Final",
"sha256": "dbd6cea7d7bf5a2604e87337cb67c9468730d599be56511ed0979aacb309f879",
},
"scala_proto_rules_netty_codec_http": {
"artifact": "io.netty:netty-codec-http:4.1.32.Final",
"sha256": "db2c22744f6a4950d1817e4e1a26692e53052c5d54abe6cceecd7df33f4eaac3",
},
"scala_proto_rules_netty_codec_socks": {
"artifact": "io.netty:netty-codec-socks:4.1.32.Final",
"sha256": "fe2f2e97d6c65dc280623dcfd24337d8a5c7377049c120842f2c59fb83d7408a",
},
"scala_proto_rules_netty_codec_http2": {
"artifact": "io.netty:netty-codec-http2:4.1.32.Final",
"sha256": "4d4c6cfc1f19efb969b9b0ae6cc977462d202867f7dcfee6e9069977e623a2f5",
},
"scala_proto_rules_netty_handler": {
"artifact": "io.netty:netty-handler:4.1.32.Final",
"sha256": "07d9756e48b5f6edc756e33e8b848fb27ff0b1ae087dab5addca6c6bf17cac2d",
},
"scala_proto_rules_netty_buffer": {
"artifact": "io.netty:netty-buffer:4.1.32.Final",
"sha256": "8ac0e30048636bd79ae205c4f9f5d7544290abd3a7ed39d8b6d97dfe3795afc1",
},
"scala_proto_rules_netty_transport": {
"artifact": "io.netty:netty-transport:4.1.32.Final",
"sha256": "175bae0d227d7932c0c965c983efbb3cf01f39abe934f5c4071d0319784715fb",
},
"scala_proto_rules_netty_resolver": {
"artifact": "io.netty:netty-resolver:4.1.32.Final",
"sha256": "9b4a19982047a95ea4791a7ad7ad385c7a08c2ac75f0a3509cc213cb32a726ae",
},
"scala_proto_rules_netty_common": {
"artifact": "io.netty:netty-common:4.1.32.Final",
"sha256": "cc993e660f8f8e3b033f1d25a9e2f70151666bdf878d460a6508cb23daa696dc",
},
"scala_proto_rules_netty_handler_proxy": {
"artifact": "io.netty:netty-handler-proxy:4.1.32.Final",
"sha256": "10d1081ed114bb0e76ebbb5331b66a6c3189cbdefdba232733fc9ca308a6ea34",
},
"scala_proto_rules_opencensus_api": {
"artifact": "io.opencensus:opencensus-api:0.22.1",
"sha256": "62a0503ee81856ba66e3cde65dee3132facb723a4fa5191609c84ce4cad36127",
},
"scala_proto_rules_opencensus_impl": {
"artifact": "io.opencensus:opencensus-impl:0.22.1",
"sha256": "9e8b209da08d1f5db2b355e781b9b969b2e0dab934cc806e33f1ab3baed4f25a",
},
"scala_proto_rules_disruptor": {
"artifact": "com.lmax:disruptor:3.4.2",
"sha256": "f412ecbb235c2460b45e63584109723dea8d94b819c78c9bfc38f50cba8546c0",
},
"scala_proto_rules_opencensus_impl_core": {
"artifact": "io.opencensus:opencensus-impl-core:0.22.1",
"sha256": "04607d100e34bacdb38f93c571c5b7c642a1a6d873191e25d49899668514db68",
},
"scala_proto_rules_opencensus_contrib_grpc_metrics": {
"artifact": "io.opencensus:opencensus-contrib-grpc-metrics:0.22.1",
"sha256": "3f6f4d5bd332c516282583a01a7c940702608a49ed6e62eb87ef3b1d320d144b",
},
"io_bazel_rules_scala_org_tpolecat_tut_core": {
"artifact": "org.tpolecat:tut-core_2.11:0.6.13",
"sha256": "7f89f9858713e9089d47389e66a2184303bf3582719e4ab313d83dccd5ed2fe9",
},
"io_bazel_rules_scala_mustache": {
"artifact": "com.github.spullara.mustache.java:compiler:0.8.18",
"sha256": "ddabc1ef897fd72319a761d29525fd61be57dc25d04d825f863f83cc89000e66",
},
"io_bazel_rules_scala_guava": {
"artifact": "com.google.guava:guava:21.0",
"sha256": "972139718abc8a4893fa78cba8cf7b2c903f35c97aaf44fa3031b0669948b480",
},
"libthrift": {
"artifact": "org.apache.thrift:libthrift:0.10.0",
"sha256": "8591718c1884ac8001b4c5ca80f349c0a6deec691de0af720c5e3bc3a581dada",
},
"io_bazel_rules_scala_scrooge_core": {
"artifact": "com.twitter:scrooge-core_2.11:21.2.0",
"sha256": "d6cef1408e34b9989ea8bc4c567dac922db6248baffe2eeaa618a5b354edd2bb",
},
"io_bazel_rules_scala_scrooge_generator": {
"artifact": "com.twitter:scrooge-generator_2.11:21.2.0",
"sha256": "87094f01df2c0670063ab6ebe156bb1a1bcdabeb95bc45552660b030287d6acb",
"runtime_deps": [
"@io_bazel_rules_scala_guava",
"@io_bazel_rules_scala_mustache",
],
},
"io_bazel_rules_scala_util_core": {
"artifact": "com.twitter:util-core_2.11:21.2.0",
"sha256": "31c33d494ca5a877c1e5b5c1f569341e1d36e7b2c8b3fb0356fb2b6d4a3907ca",
},
"io_bazel_rules_scala_util_logging": {
"artifact": "com.twitter:util-logging_2.11:21.2.0",
"sha256": "f3b62465963fbf0fe9860036e6255337996bb48a1a3f21a29503a2750d34f319",
},
"io_bazel_rules_scala_javax_annotation_api": {
"artifact": "javax.annotation:javax.annotation-api:1.3.2",
"sha256": "e04ba5195bcd555dc95650f7cc614d151e4bcd52d29a10b8aa2197f3ab89ab9b",
},
# test only
"com_twitter__scalding_date": {
"testonly": True,
"artifact": "com.twitter:scalding-date_2.11:0.17.0",
"sha256": "bf743cd6d224a4568d6486a2b794143e23145d2afd7a1d2de412d49e45bdb308",
},
"org_typelevel__cats_core": {
"testonly": True,
"artifact": "org.typelevel:cats-core_2.11:0.9.0",
"sha256": "3fda7a27114b0d178107ace5c2cf04e91e9951810690421768e65038999ffca5",
},
"com_google_guava_guava_21_0_with_file": {
"testonly": True,
"artifact": "com.google.guava:guava:21.0",
"sha256": "972139718abc8a4893fa78cba8cf7b2c903f35c97aaf44fa3031b0669948b480",
},
"com_github_jnr_jffi_native": {
"testonly": True,
"artifact": "com.github.jnr:jffi:jar:native:1.2.17",
"sha256": "4eb582bc99d96c8df92fc6f0f608fd123d278223982555ba16219bf8be9f75a9",
},
"org_apache_commons_commons_lang_3_5": {
"testonly": True,
"artifact": "org.apache.commons:commons-lang3:3.5",
"sha256": "8ac96fc686512d777fca85e144f196cd7cfe0c0aec23127229497d1a38ff651c",
},
"org_springframework_spring_core": {
"testonly": True,
"artifact": "org.springframework:spring-core:5.1.5.RELEASE",
"sha256": "f771b605019eb9d2cf8f60c25c050233e39487ff54d74c93d687ea8de8b7285a",
},
"org_springframework_spring_tx": {
"testonly": True,
"artifact": "org.springframework:spring-tx:5.1.5.RELEASE",
"sha256": "666f72b73c7e6b34e5bb92a0d77a14cdeef491c00fcb07a1e89eb62b08500135",
"deps": [
"@org_springframework_spring_core",
],
},
"com_google_guava_guava_21_0": {
"testonly": True,
"artifact": "com.google.guava:guava:21.0",
"sha256": "972139718abc8a4893fa78cba8cf7b2c903f35c97aaf44fa3031b0669948b480",
"deps": [
"@org_springframework_spring_core",
],
},
"org_spire_math_kind_projector": {
"testonly": True,
"artifact": "org.spire-math:kind-projector_2.11:0.9.10",
"sha256": "897460d4488b7dd6ac9198937d6417b36cc6ec8ab3693fdf2c532652f26c4373",
},
}
| 44.229787 | 85 | 0.682894 | artifacts = {
"io_bazel_rules_scala_scala_library": {
"artifact": "org.scala-lang:scala-library:2.11.12",
"sha256": "0b3d6fd42958ee98715ba2ec5fe221f4ca1e694d7c981b0ae0cd68e97baf6dce",
},
"io_bazel_rules_scala_scala_compiler": {
"artifact": "org.scala-lang:scala-compiler:2.11.12",
"sha256": "3e892546b72ab547cb77de4d840bcfd05c853e73390fed7370a8f19acb0735a0",
},
"io_bazel_rules_scala_scala_reflect": {
"artifact": "org.scala-lang:scala-reflect:2.11.12",
"sha256": "6ba385b450a6311a15c918cf8688b9af9327c6104f0ecbd35933cfcd3095fe04",
},
"io_bazel_rules_scala_scalatest": {
"artifact": "org.scalatest:scalatest_2.11:3.1.2",
"sha256": "5a61de4a55b9bd1ce2b2936200c4d5b0b05d96ac9727d361ee37f7a5add5d86a",
},
"io_bazel_rules_scala_scalactic": {
"artifact": "org.scalactic:scalactic_2.11:3.1.2",
"sha256": "60642da4dcfa1e1fae02c394e9d8a1ce4c08f1b189bae86b8f3809310c12c29b",
},
"io_bazel_rules_scala_scala_xml": {
"artifact": "org.scala-lang.modules:scala-xml_2.11:1.2.0",
"sha256": "eaddac168ef1e28978af768706490fa4358323a08964c25fa1027c52238e3702",
},
"io_bazel_rules_scala_scala_parser_combinators": {
"artifact": "org.scala-lang.modules:scala-parser-combinators_2.11:1.1.2",
"sha256": "3e0889e95f5324da6420461f7147cb508241ed957ac5cfedc25eef19c5448f26",
},
"org_scalameta_common": {
"artifact": "org.scalameta:common_2.11:4.3.0",
"sha256": "6330798bcbd78d14d371202749f32efda0465c3be5fd057a6055a67e21335ba0",
"deps": [
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
],
},
"org_scalameta_fastparse": {
"artifact": "org.scalameta:fastparse_2.11:1.0.1",
"sha256": "49ecc30a4b47efc0038099da0c97515cf8f754ea631ea9f9935b36ca7d41b733",
"deps": [
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
"@org_scalameta_fastparse_utils",
],
},
"org_scalameta_fastparse_utils": {
"artifact": "org.scalameta:fastparse-utils_2.11:1.0.1",
"sha256": "93f58db540e53178a686621f7a9c401307a529b68e051e38804394a2a86cea94",
"deps": [
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
],
},
"org_scala_lang_modules_scala_collection_compat": {
"artifact": "org.scala-lang.modules:scala-collection-compat_2.11:2.1.2",
"sha256": "e9667b8b7276aeb42599f536fe4d7caab06eabc55e9995572267ad60c7a11c8b",
"deps": [
"@io_bazel_rules_scala_scala_library",
],
},
"org_scalameta_parsers": {
"artifact": "org.scalameta:parsers_2.11:4.3.0",
"sha256": "724382abfac27b32dec6c21210562bc7e1b09b5268ccb704abe66dcc8844beeb",
"deps": [
"@io_bazel_rules_scala_scala_library",
"@org_scalameta_trees",
],
},
"org_scalameta_scalafmt_core": {
"artifact": "org.scalameta:scalafmt-core_2.11:2.3.2",
"sha256": "6bf391e0e1d7369fda83ddaf7be4d267bf4cbccdf2cc31ff941999a78c30e67f",
"deps": [
"@com_geirsson_metaconfig_core",
"@com_geirsson_metaconfig_typesafe_config",
"@io_bazel_rules_scala_scala_library",
"@io_bazel_rules_scala_scala_reflect",
"@org_scalameta_scalameta",
"@org_scala_lang_modules_scala_collection_compat",
],
},
"org_scalameta_scalameta": {
"artifact": "org.scalameta:scalameta_2.11:4.3.0",
"sha256": "94fe739295447cd3ae877c279ccde1def06baea02d9c76a504dda23de1d90516",
"deps": [
"@io_bazel_rules_scala_scala_library",
"@org_scala_lang_scalap",
"@org_scalameta_parsers",
],
},
"org_scalameta_trees": {
"artifact": "org.scalameta:trees_2.11:4.3.0",
"sha256": "d24d5d63d8deafe646d455c822593a66adc6fdf17c8373754a3834a6e92a8a72",
"deps": [
"@com_thesamet_scalapb_scalapb_runtime",
"@io_bazel_rules_scala_scala_library",
"@org_scalameta_common",
"@org_scalameta_fastparse",
],
},
"org_typelevel_paiges_core": {
"artifact": "org.typelevel:paiges-core_2.11:0.2.4",
"sha256": "aa66fbe0457ca5cb5b9e522d4cb873623bb376a2e1ff58c464b5194c1d87c241",
"deps": [
"@io_bazel_rules_scala_scala_library",
],
},
"com_typesafe_config": {
"artifact": "com.typesafe:config:1.3.3",
"sha256": "b5f1d6071f1548d05be82f59f9039c7d37a1787bd8e3c677e31ee275af4a4621",
},
"org_scala_lang_scalap": {
"artifact": "org.scala-lang:scalap:2.11.12",
"sha256": "a6dd7203ce4af9d6185023d5dba9993eb8e80584ff4b1f6dec574a2aba4cd2b7",
"deps": [
"@io_bazel_rules_scala_scala_compiler",
],
},
"com_thesamet_scalapb_lenses": {
"artifact": "com.thesamet.scalapb:lenses_2.11:0.9.0",
"sha256": "f4809760edee6abc97a7fe9b7fd6ae5fe1006795b1dc3963ab4e317a72f1a385",
"deps": [
"@io_bazel_rules_scala_scala_library",
],
},
"com_thesamet_scalapb_scalapb_runtime": {
"artifact": "com.thesamet.scalapb:scalapb-runtime_2.11:0.9.0",
"sha256": "ab1e449a18a9ce411eb3fec31bdbca5dd5fae4475b1557bb5e235a7b54738757",
"deps": [
"@com_google_protobuf_protobuf_java",
"@com_lihaoyi_fastparse",
"@com_thesamet_scalapb_lenses",
"@io_bazel_rules_scala_scala_library",
],
},
"com_lihaoyi_fansi": {
"artifact": "com.lihaoyi:fansi_2.11:0.2.5",
"sha256": "1ff0a8304f322c1442e6bcf28fab07abf3cf560dd24573dbe671249aee5fc488",
"deps": [
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
],
},
"com_lihaoyi_fastparse": {
"artifact": "com.lihaoyi:fastparse_2.11:2.1.2",
"sha256": "5c5d81f90ada03ac5b21b161864a52558133951031ee5f6bf4d979e8baa03628",
"deps": [
"@com_lihaoyi_sourcecode",
],
},
"com_lihaoyi_pprint": {
"artifact": "com.lihaoyi:pprint_2.11:0.5.3",
"sha256": "fb5e4921e7dff734d049e752a482d3a031380d3eea5caa76c991312dee9e6991",
"deps": [
"@com_lihaoyi_fansi",
"@com_lihaoyi_sourcecode",
"@io_bazel_rules_scala_scala_library",
],
},
"com_lihaoyi_sourcecode": {
"artifact": "com.lihaoyi:sourcecode_2.11:0.1.7",
"sha256": "33516d7fd9411f74f05acfd5274e1b1889b7841d1993736118803fc727b2d5fc",
"deps": [
"@io_bazel_rules_scala_scala_library",
],
},
"com_google_protobuf_protobuf_java": {
"artifact": "com.google.protobuf:protobuf-java:3.10.0",
"sha256": "161d7d61a8cb3970891c299578702fd079646e032329d6c2cabf998d191437c9",
},
"com_geirsson_metaconfig_core": {
"artifact": "com.geirsson:metaconfig-core_2.11:0.9.4",
"sha256": "5d5704a1f1c4f74aed26248eeb9b577274d570b167cec0bf51d2908609c29118",
"deps": [
"@com_lihaoyi_pprint",
"@io_bazel_rules_scala_scala_library",
"@org_typelevel_paiges_core",
"@org_scala_lang_modules_scala_collection_compat",
],
},
"com_geirsson_metaconfig_typesafe_config": {
"artifact": "com.geirsson:metaconfig-typesafe-config_2.11:0.9.4",
"sha256": "52d2913640f4592402aeb2f0cec5004893d02acf26df4aa1cf8d4dcb0d2b21c7",
"deps": [
"@com_geirsson_metaconfig_core",
"@com_typesafe_config",
"@io_bazel_rules_scala_scala_library",
"@org_scala_lang_modules_scala_collection_compat",
],
},
"io_bazel_rules_scala_org_openjdk_jmh_jmh_core": {
"artifact": "org.openjdk.jmh:jmh-core:1.20",
"sha256": "1688db5110ea6413bf63662113ed38084106ab1149e020c58c5ac22b91b842ca",
},
"io_bazel_rules_scala_org_openjdk_jmh_jmh_generator_asm": {
"artifact": "org.openjdk.jmh:jmh-generator-asm:1.20",
"sha256": "2dd4798b0c9120326310cda3864cc2e0035b8476346713d54a28d1adab1414a5",
},
"io_bazel_rules_scala_org_openjdk_jmh_jmh_generator_reflection": {
"artifact": "org.openjdk.jmh:jmh-generator-reflection:1.20",
"sha256": "57706f7c8278272594a9afc42753aaf9ba0ba05980bae0673b8195908d21204e",
},
"io_bazel_rules_scala_org_ows2_asm_asm": {
"artifact": "org.ow2.asm:asm:6.1.1",
"sha256": "dd3b546415dd4bade2ebe3b47c7828ab0623ee2336604068e2d81023f9f8d833",
},
"io_bazel_rules_scala_net_sf_jopt_simple_jopt_simple": {
"artifact": "net.sf.jopt-simple:jopt-simple:4.6",
"sha256": "3fcfbe3203c2ea521bf7640484fd35d6303186ea2e08e72f032d640ca067ffda",
},
"io_bazel_rules_scala_org_apache_commons_commons_math3": {
"artifact": "org.apache.commons:commons-math3:3.6.1",
"sha256": "1e56d7b058d28b65abd256b8458e3885b674c1d588fa43cd7d1cbb9c7ef2b308",
},
"io_bazel_rules_scala_junit_junit": {
"artifact": "junit:junit:4.12",
"sha256": "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
},
"io_bazel_rules_scala_org_hamcrest_hamcrest_core": {
"artifact": "org.hamcrest:hamcrest-core:1.3",
"sha256": "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
},
"io_bazel_rules_scala_org_specs2_specs2_common": {
"artifact": "org.specs2:specs2-common_2.11:4.4.1",
"sha256": "52d7c0da58725606e98c6e8c81d2efe632053520a25da9140116d04a4abf9d2c",
"deps": [
"@io_bazel_rules_scala_org_specs2_specs2_fp",
],
},
"io_bazel_rules_scala_org_specs2_specs2_core": {
"artifact": "org.specs2:specs2-core_2.11:4.4.1",
"sha256": "8e95cb7e347e7a87e7a80466cbd88419ece1aaacb35c32e8bd7d299a623b31b9",
"deps": [
"@io_bazel_rules_scala_org_specs2_specs2_common",
"@io_bazel_rules_scala_org_specs2_specs2_matcher",
],
},
"io_bazel_rules_scala_org_specs2_specs2_fp": {
"artifact": "org.specs2:specs2-fp_2.11:4.4.1",
"sha256": "e43006fdd0726ffcd1e04c6c4d795176f5f765cc787cc09baebe1fcb009e4462",
},
"io_bazel_rules_scala_org_specs2_specs2_matcher": {
"artifact": "org.specs2:specs2-matcher_2.11:4.4.1",
"sha256": "448e5ab89d4d650d23030fdbee66a010a07dcac5e4c3e73ef5fe39ca1aace1cd",
"deps": [
"@io_bazel_rules_scala_org_specs2_specs2_common",
],
},
"io_bazel_rules_scala_org_specs2_specs2_junit": {
"artifact": "org.specs2:specs2-junit_2.11:4.4.1",
"sha256": "a8549d52e87896624200fe35ef7b841c1c698a8fb5d97d29bf082762aea9bb72",
"deps": [
"@io_bazel_rules_scala_org_specs2_specs2_core",
],
},
"scala_proto_rules_scalapb_plugin": {
"artifact": "com.thesamet.scalapb:compilerplugin_2.11:0.9.7",
"sha256": "2d6793fa2565953ef2b5094fc37fae4933f3c42e4cb4048d54e7f358ec104a87",
},
"scala_proto_rules_protoc_bridge": {
"artifact": "com.thesamet.scalapb:protoc-bridge_2.11:0.7.14",
"sha256": "314e34bf331b10758ff7a780560c8b5a5b09e057695a643e33ab548e3d94aa03",
},
"scala_proto_rules_scalapb_runtime": {
"artifact": "com.thesamet.scalapb:scalapb-runtime_2.11:0.9.7",
"sha256": "5131033e9536727891a38004ec707a93af1166cb8283c7db711c2c105fbf289e",
},
"scala_proto_rules_scalapb_runtime_grpc": {
"artifact": "com.thesamet.scalapb:scalapb-runtime-grpc_2.11:0.9.7",
"sha256": "24d19df500ce6450d8f7aa72a9bad675fa4f3650f7736d548aa714058f887e23",
},
"scala_proto_rules_scalapb_lenses": {
"artifact": "com.thesamet.scalapb:lenses_2.11:0.9.7",
"sha256": "f8e3b526ceac998652b296014e9ab4c0ab906a40837dd1dfcf6948b6f5a1a8bf",
},
"scala_proto_rules_scalapb_fastparse": {
"artifact": "com.lihaoyi:fastparse_2.11:2.1.2",
"sha256": "5c5d81f90ada03ac5b21b161864a52558133951031ee5f6bf4d979e8baa03628",
},
"scala_proto_rules_grpc_core": {
"artifact": "io.grpc:grpc-core:1.24.0",
"sha256": "8fc900625a9330b1c155b5423844d21be0a5574fe218a63170a16796c6f7880e",
},
"scala_proto_rules_grpc_api": {
"artifact": "io.grpc:grpc-api:1.24.0",
"sha256": "553978366e04ee8ddba64afde3b3cf2ac021a2f3c2db2831b6491d742b558598",
},
"scala_proto_rules_grpc_stub": {
"artifact": "io.grpc:grpc-stub:1.24.0",
"sha256": "eaa9201896a77a0822e26621b538c7154f00441a51c9b14dc9e1ec1f2acfb815",
},
"scala_proto_rules_grpc_protobuf": {
"artifact": "io.grpc:grpc-protobuf:1.24.0",
"sha256": "88cd0838ea32893d92cb214ea58908351854ed8de7730be07d5f7d19025dd0bc",
},
"scala_proto_rules_grpc_netty": {
"artifact": "io.grpc:grpc-netty:1.24.0",
"sha256": "8478333706ba442a354c2ddb8832d80a5aef71016e8a9cf07e7bf6e8c298f042",
},
"scala_proto_rules_grpc_context": {
"artifact": "io.grpc:grpc-context:1.24.0",
"sha256": "1f0546e18789f7445d1c5a157010a11bc038bbb31544cdb60d9da3848efcfeea",
},
"scala_proto_rules_perfmark_api": {
"artifact": "io.perfmark:perfmark-api:0.17.0",
"sha256": "816c11409b8a0c6c9ce1cda14bed526e7b4da0e772da67c5b7b88eefd41520f9",
},
"scala_proto_rules_guava": {
"artifact": "com.google.guava:guava:26.0-android",
"sha256": "1d044ebb866ef08b7d04e998b4260c9b52fab6e6d6b68d207859486bb3686cd5",
},
"scala_proto_rules_google_instrumentation": {
"artifact": "com.google.instrumentation:instrumentation-api:0.3.0",
"sha256": "671f7147487877f606af2c7e39399c8d178c492982827305d3b1c7f5b04f1145",
},
"scala_proto_rules_netty_codec": {
"artifact": "io.netty:netty-codec:4.1.32.Final",
"sha256": "dbd6cea7d7bf5a2604e87337cb67c9468730d599be56511ed0979aacb309f879",
},
"scala_proto_rules_netty_codec_http": {
"artifact": "io.netty:netty-codec-http:4.1.32.Final",
"sha256": "db2c22744f6a4950d1817e4e1a26692e53052c5d54abe6cceecd7df33f4eaac3",
},
"scala_proto_rules_netty_codec_socks": {
"artifact": "io.netty:netty-codec-socks:4.1.32.Final",
"sha256": "fe2f2e97d6c65dc280623dcfd24337d8a5c7377049c120842f2c59fb83d7408a",
},
"scala_proto_rules_netty_codec_http2": {
"artifact": "io.netty:netty-codec-http2:4.1.32.Final",
"sha256": "4d4c6cfc1f19efb969b9b0ae6cc977462d202867f7dcfee6e9069977e623a2f5",
},
"scala_proto_rules_netty_handler": {
"artifact": "io.netty:netty-handler:4.1.32.Final",
"sha256": "07d9756e48b5f6edc756e33e8b848fb27ff0b1ae087dab5addca6c6bf17cac2d",
},
"scala_proto_rules_netty_buffer": {
"artifact": "io.netty:netty-buffer:4.1.32.Final",
"sha256": "8ac0e30048636bd79ae205c4f9f5d7544290abd3a7ed39d8b6d97dfe3795afc1",
},
"scala_proto_rules_netty_transport": {
"artifact": "io.netty:netty-transport:4.1.32.Final",
"sha256": "175bae0d227d7932c0c965c983efbb3cf01f39abe934f5c4071d0319784715fb",
},
"scala_proto_rules_netty_resolver": {
"artifact": "io.netty:netty-resolver:4.1.32.Final",
"sha256": "9b4a19982047a95ea4791a7ad7ad385c7a08c2ac75f0a3509cc213cb32a726ae",
},
"scala_proto_rules_netty_common": {
"artifact": "io.netty:netty-common:4.1.32.Final",
"sha256": "cc993e660f8f8e3b033f1d25a9e2f70151666bdf878d460a6508cb23daa696dc",
},
"scala_proto_rules_netty_handler_proxy": {
"artifact": "io.netty:netty-handler-proxy:4.1.32.Final",
"sha256": "10d1081ed114bb0e76ebbb5331b66a6c3189cbdefdba232733fc9ca308a6ea34",
},
"scala_proto_rules_opencensus_api": {
"artifact": "io.opencensus:opencensus-api:0.22.1",
"sha256": "62a0503ee81856ba66e3cde65dee3132facb723a4fa5191609c84ce4cad36127",
},
"scala_proto_rules_opencensus_impl": {
"artifact": "io.opencensus:opencensus-impl:0.22.1",
"sha256": "9e8b209da08d1f5db2b355e781b9b969b2e0dab934cc806e33f1ab3baed4f25a",
},
"scala_proto_rules_disruptor": {
"artifact": "com.lmax:disruptor:3.4.2",
"sha256": "f412ecbb235c2460b45e63584109723dea8d94b819c78c9bfc38f50cba8546c0",
},
"scala_proto_rules_opencensus_impl_core": {
"artifact": "io.opencensus:opencensus-impl-core:0.22.1",
"sha256": "04607d100e34bacdb38f93c571c5b7c642a1a6d873191e25d49899668514db68",
},
"scala_proto_rules_opencensus_contrib_grpc_metrics": {
"artifact": "io.opencensus:opencensus-contrib-grpc-metrics:0.22.1",
"sha256": "3f6f4d5bd332c516282583a01a7c940702608a49ed6e62eb87ef3b1d320d144b",
},
"io_bazel_rules_scala_org_tpolecat_tut_core": {
"artifact": "org.tpolecat:tut-core_2.11:0.6.13",
"sha256": "7f89f9858713e9089d47389e66a2184303bf3582719e4ab313d83dccd5ed2fe9",
},
"io_bazel_rules_scala_mustache": {
"artifact": "com.github.spullara.mustache.java:compiler:0.8.18",
"sha256": "ddabc1ef897fd72319a761d29525fd61be57dc25d04d825f863f83cc89000e66",
},
"io_bazel_rules_scala_guava": {
"artifact": "com.google.guava:guava:21.0",
"sha256": "972139718abc8a4893fa78cba8cf7b2c903f35c97aaf44fa3031b0669948b480",
},
"libthrift": {
"artifact": "org.apache.thrift:libthrift:0.10.0",
"sha256": "8591718c1884ac8001b4c5ca80f349c0a6deec691de0af720c5e3bc3a581dada",
},
"io_bazel_rules_scala_scrooge_core": {
"artifact": "com.twitter:scrooge-core_2.11:21.2.0",
"sha256": "d6cef1408e34b9989ea8bc4c567dac922db6248baffe2eeaa618a5b354edd2bb",
},
"io_bazel_rules_scala_scrooge_generator": {
"artifact": "com.twitter:scrooge-generator_2.11:21.2.0",
"sha256": "87094f01df2c0670063ab6ebe156bb1a1bcdabeb95bc45552660b030287d6acb",
"runtime_deps": [
"@io_bazel_rules_scala_guava",
"@io_bazel_rules_scala_mustache",
],
},
"io_bazel_rules_scala_util_core": {
"artifact": "com.twitter:util-core_2.11:21.2.0",
"sha256": "31c33d494ca5a877c1e5b5c1f569341e1d36e7b2c8b3fb0356fb2b6d4a3907ca",
},
"io_bazel_rules_scala_util_logging": {
"artifact": "com.twitter:util-logging_2.11:21.2.0",
"sha256": "f3b62465963fbf0fe9860036e6255337996bb48a1a3f21a29503a2750d34f319",
},
"io_bazel_rules_scala_javax_annotation_api": {
"artifact": "javax.annotation:javax.annotation-api:1.3.2",
"sha256": "e04ba5195bcd555dc95650f7cc614d151e4bcd52d29a10b8aa2197f3ab89ab9b",
},
"com_twitter__scalding_date": {
"testonly": True,
"artifact": "com.twitter:scalding-date_2.11:0.17.0",
"sha256": "bf743cd6d224a4568d6486a2b794143e23145d2afd7a1d2de412d49e45bdb308",
},
"org_typelevel__cats_core": {
"testonly": True,
"artifact": "org.typelevel:cats-core_2.11:0.9.0",
"sha256": "3fda7a27114b0d178107ace5c2cf04e91e9951810690421768e65038999ffca5",
},
"com_google_guava_guava_21_0_with_file": {
"testonly": True,
"artifact": "com.google.guava:guava:21.0",
"sha256": "972139718abc8a4893fa78cba8cf7b2c903f35c97aaf44fa3031b0669948b480",
},
"com_github_jnr_jffi_native": {
"testonly": True,
"artifact": "com.github.jnr:jffi:jar:native:1.2.17",
"sha256": "4eb582bc99d96c8df92fc6f0f608fd123d278223982555ba16219bf8be9f75a9",
},
"org_apache_commons_commons_lang_3_5": {
"testonly": True,
"artifact": "org.apache.commons:commons-lang3:3.5",
"sha256": "8ac96fc686512d777fca85e144f196cd7cfe0c0aec23127229497d1a38ff651c",
},
"org_springframework_spring_core": {
"testonly": True,
"artifact": "org.springframework:spring-core:5.1.5.RELEASE",
"sha256": "f771b605019eb9d2cf8f60c25c050233e39487ff54d74c93d687ea8de8b7285a",
},
"org_springframework_spring_tx": {
"testonly": True,
"artifact": "org.springframework:spring-tx:5.1.5.RELEASE",
"sha256": "666f72b73c7e6b34e5bb92a0d77a14cdeef491c00fcb07a1e89eb62b08500135",
"deps": [
"@org_springframework_spring_core",
],
},
"com_google_guava_guava_21_0": {
"testonly": True,
"artifact": "com.google.guava:guava:21.0",
"sha256": "972139718abc8a4893fa78cba8cf7b2c903f35c97aaf44fa3031b0669948b480",
"deps": [
"@org_springframework_spring_core",
],
},
"org_spire_math_kind_projector": {
"testonly": True,
"artifact": "org.spire-math:kind-projector_2.11:0.9.10",
"sha256": "897460d4488b7dd6ac9198937d6417b36cc6ec8ab3693fdf2c532652f26c4373",
},
}
| true | true |
f728494cc3ef6bb334cd5035b7f7227ccaee9864 | 1,098 | py | Python | tests/__init__.py | orest-d/libhxl-python | fd4523bdef7c0c0333bd0b287d7eed70e18e48e1 | [
"Unlicense"
] | null | null | null | tests/__init__.py | orest-d/libhxl-python | fd4523bdef7c0c0333bd0b287d7eed70e18e48e1 | [
"Unlicense"
] | null | null | null | tests/__init__.py | orest-d/libhxl-python | fd4523bdef7c0c0333bd0b287d7eed70e18e48e1 | [
"Unlicense"
] | null | null | null | import logging
import os
import re
import unittest.mock
# Default to turning off all but critical logging messages
logging.basicConfig(level=logging.CRITICAL)
def mock_open_url(url, allow_local=False, timeout=None, verify_ssl=True, http_headers=None):
"""Open local files instead of URLs.
If it's a local file path, leave it alone; otherwise,
open as a file under ./files/
This is meant as a side effect for unittest.mock.Mock
"""
if re.match(r'https?:', url):
# Looks like a URL
filename = re.sub(r'^.*/([^/]+)$', '\\1', url)
path = resolve_path('files/mock/' + filename)
else:
# Assume it's a file
path = url
return (open(path, 'rb'), None, None, None)
def resolve_path(filename):
"""Resolve a pathname for a test input file."""
return os.path.join(os.path.dirname(__file__), filename)
# Target function to replace for mocking URL access.
URL_MOCK_TARGET = 'hxl.io.open_url_or_file'
# Mock object to replace hxl.io.make_stream
URL_MOCK_OBJECT = unittest.mock.Mock()
URL_MOCK_OBJECT.side_effect = mock_open_url
| 31.371429 | 92 | 0.691257 | import logging
import os
import re
import unittest.mock
logging.basicConfig(level=logging.CRITICAL)
def mock_open_url(url, allow_local=False, timeout=None, verify_ssl=True, http_headers=None):
if re.match(r'https?:', url):
filename = re.sub(r'^.*/([^/]+)$', '\\1', url)
path = resolve_path('files/mock/' + filename)
else:
path = url
return (open(path, 'rb'), None, None, None)
def resolve_path(filename):
return os.path.join(os.path.dirname(__file__), filename)
# Target function to replace for mocking URL access.
URL_MOCK_TARGET = 'hxl.io.open_url_or_file'
# Mock object to replace hxl.io.make_stream
URL_MOCK_OBJECT = unittest.mock.Mock()
URL_MOCK_OBJECT.side_effect = mock_open_url
| true | true |
f7284a0fc1576b461e592f8ca835c0f4597e80a3 | 9,041 | py | Python | methylcapsnet/.ipynb_checkpoints/methylcaps_model_-checkpoint.py | Christensen-Lab-Dartmouth/MethylCapsNet | 17b6b19809c5e1984de804eb34cc7494210f91a6 | [
"MIT"
] | 3 | 2020-10-22T18:53:33.000Z | 2022-03-19T16:27:41.000Z | methylcapsnet/.ipynb_checkpoints/methylcaps_model_-checkpoint.py | Christensen-Lab-Dartmouth/MethylCapsNet | 17b6b19809c5e1984de804eb34cc7494210f91a6 | [
"MIT"
] | 3 | 2020-10-01T04:56:09.000Z | 2020-10-01T04:56:46.000Z | methylcapsnet/.ipynb_checkpoints/methylcaps_model_-checkpoint.py | Christensen-Lab-Dartmouth/MethylCapsNet | 17b6b19809c5e1984de804eb34cc7494210f91a6 | [
"MIT"
] | 1 | 2020-08-31T17:07:49.000Z | 2020-08-31T17:07:49.000Z | import pandas as pd
from pymethylprocess.MethylationDataTypes import MethylationArray
from sklearn.metrics import mean_absolute_error, r2_score
import warnings
warnings.filterwarnings("ignore")
from pybedtools import BedTool
import numpy as np
from functools import reduce
from torch.utils.data import Dataset, DataLoader
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
import os
import pysnooper
import argparse
import pickle
from sklearn.metrics import classification_report
import click
import methylcapsnet
from methylcapsnet.build_capsules import *
from methylcapsnet.methylcaps_data_models import *
import sqlite3
import os
import glob
import dask
from dask.diagnostics import ProgressBar
from pathos.multiprocessing import Pool
import multiprocessing
import dask.bag as db
from distributed import Client, LocalCluster, get_task_stream
RANDOM_SEED=42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@pysnooper.snoop('train.log')
def model_capsnet_(train_methyl_array='train_val_test_sets/train_methyl_array.pkl',
val_methyl_array='train_val_test_sets/val_methyl_array.pkl',
interest_col='disease',
n_epochs=10,
n_bins=0,
bin_len=1000000,
min_capsule_len=300,
primary_caps_out_len=45,
caps_out_len=45,
hidden_topology='30,80,50',
gamma=1e-2,
decoder_topology='100,300',
learning_rate=1e-2,
routing_iterations=3,
overlap=0.,
custom_loss='none',
gamma2=1e-2,
job=0,
capsule_choice=['genomic_binned'],
custom_capsule_file='',
test_methyl_array='',
predict=False,
batch_size=16,
limited_capsule_names_file='',
gsea_superset='',
tissue='',
number_sets=25,
use_set=False,
gene_context=False,
select_subtypes=[],
fit_spw=False,
l1_l2='',
custom_capsule_file2='',
min_capsules=5):
capsule_choice=list(capsule_choice)
#custom_capsule_file=list(custom_capsule_file)
hlt_list=filter(None,hidden_topology.split(','))
if hlt_list:
hidden_topology=list(map(int,hlt_list))
else:
hidden_topology=[]
hlt_list=filter(None,decoder_topology.split(','))
if hlt_list:
decoder_topology=list(map(int,hlt_list))
else:
decoder_topology=[]
hidden_caps_layers=[]
include_last=False
ma=MethylationArray.from_pickle(train_methyl_array)
ma_v=MethylationArray.from_pickle(val_methyl_array)
if test_methyl_array and predict:
ma_t=MethylationArray.from_pickle(test_methyl_array)
try:
ma.remove_na_samples(interest_col)
ma_v.remove_na_samples(interest_col)
if test_methyl_array and predict:
ma_t.remove_na_samples(interest_col)
except:
pass
if select_subtypes:
print(ma.pheno[interest_col].unique())
ma.pheno=ma.pheno.loc[ma.pheno[interest_col].isin(select_subtypes)]
ma.beta=ma.beta.loc[ma.pheno.index]
ma_v.pheno=ma_v.pheno.loc[ma_v.pheno[interest_col].isin(select_subtypes)]
ma_v.beta=ma_v.beta.loc[ma_v.pheno.index]
print(ma.pheno[interest_col].unique())
if test_methyl_array and predict:
ma_t.pheno=ma_t.pheno.loc[ma_t.pheno[interest_col].isin(select_subtypes)]
ma_t.beta=ma_t.beta.loc[ma_t.pheno.index]
if custom_capsule_file2 and os.path.exists(custom_capsule_file2):
capsules_dict=torch.load(custom_capsule_file2)
final_modules, modulecpgs, module_names=capsules_dict['final_modules'], capsules_dict['modulecpgs'], capsules_dict['module_names']
if min_capsule_len>1:
include_capsules=[len(x)>min_capsule_len for x in final_modules]
final_modules=[final_modules[i] for i in range(len(final_modules)) if include_capsules[i]]
module_names=[module_names[i] for i in range(len(module_names)) if include_capsules[i]]
modulecpgs=(reduce(np.union1d,final_modules)).tolist()
else:
final_modules, modulecpgs, module_names=build_capsules(capsule_choice,
overlap,
bin_len,
ma,
include_last,
min_capsule_len,
custom_capsule_file,
gsea_superset,
tissue,
gene_context,
use_set,
number_sets,
limited_capsule_names_file)
if custom_capsule_file2:
torch.save(dict(final_modules=final_modules, modulecpgs=modulecpgs, module_names=module_names),custom_capsule_file2)
assert len(final_modules) >= min_capsules , "Below the number of allowed capsules."
if fit_spw:
modulecpgs=list(reduce(lambda x,y:np.hstack((x,y)),final_modules))
if not include_last: # ERROR HAPPENS HERE!
ma.beta=ma.beta.loc[:,modulecpgs]
ma_v.beta=ma_v.beta.loc[:,modulecpgs]
if test_methyl_array and predict:
ma_t.beta=ma_t.beta.loc[:,modulecpgs]
# https://github.com/higgsfield/Capsule-Network-Tutorial/blob/master/Capsule%20Network.ipynb
original_interest_col=interest_col
if n_bins:
new_interest_col=interest_col+'_binned'
ma.pheno.loc[:,new_interest_col],bins=pd.cut(ma.pheno[interest_col],bins=n_bins,retbins=True)
ma_v.pheno.loc[:,new_interest_col],_=pd.cut(ma_v.pheno[interest_col],bins=bins,retbins=True)
if test_methyl_array and predict:
ma_t.pheno.loc[:,new_interest_col],_=pd.cut(ma_t.pheno[interest_col],bins=bins,retbins=True)
interest_col=new_interest_col
datasets=dict()
datasets['train']=MethylationDataset(ma,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)
print(datasets['train'].X.isnull().sum().sum())
datasets['val']=MethylationDataset(ma_v,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)
if test_methyl_array and predict:
datasets['test']=MethylationDataset(ma_t,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)
dataloaders=dict()
dataloaders['train']=DataLoader(datasets['train'],batch_size=batch_size,shuffle=True,num_workers=8, pin_memory=True, drop_last=True)
dataloaders['val']=DataLoader(datasets['val'],batch_size=batch_size,shuffle=False,num_workers=8, pin_memory=True, drop_last=False)
n_primary=len(final_modules)
if test_methyl_array and predict:
dataloaders['test']=DataLoader(datasets['test'],batch_size=batch_size,shuffle=False,num_workers=8, pin_memory=True, drop_last=False)
n_inputs=list(map(len,final_modules))
n_out_caps=len(datasets['train'].y_unique)
if not fit_spw:
print("Not fitting MethylSPWNet")
primary_caps = PrimaryCaps(modules=final_modules,hidden_topology=hidden_topology,n_output=primary_caps_out_len)
hidden_caps = []
output_caps = CapsLayer(n_out_caps,n_primary,primary_caps_out_len,caps_out_len,routing_iterations=routing_iterations)
decoder = Decoder(n_out_caps*caps_out_len,len(list(ma.beta)),decoder_topology)
model = CapsNet(primary_caps, hidden_caps, output_caps, decoder, gamma=gamma)
if test_methyl_array and predict:
model.load_state_dict(torch.load('capsnet_model.pkl'))
else:
print("Fitting MethylSPWNet")
module_lens=[len(x) for x in final_modules]
model=MethylSPWNet(module_lens, hidden_topology, dropout_p=0.2, n_output=n_out_caps)
if test_methyl_array and predict:
model.load_state_dict(torch.load('spwnet_model.pkl'))
if torch.cuda.is_available():
model=model.cuda()
# extract all c_ij for all layers across all batches, or just last batch
if l1_l2 and fit_spw:
l1,l2=list(map(float,l1_l2.split(',')))
elif fit_spw:
l1,l2=0.,0.
trainer=Trainer(model=model,
validation_dataloader=dataloaders['val'],
n_epochs=n_epochs,
lr=learning_rate,
n_primary=n_primary,
custom_loss=custom_loss,
gamma2=gamma2,
spw_mode=fit_spw,
l1=l1 if fit_spw else 0.,
l2=l2 if fit_spw else 0.)
if not predict:
try:
#assert 1==2
trainer.fit(dataloader=dataloaders['train'])
val_loss=min(trainer.val_losses)
torch.save(trainer.model.state_dict(),'capsnet_model.pkl' if not fit_spw else 'spwnet_model.pkl')
if fit_spw:
torch.save(dict(final_modules=final_modules, modulecpgs=modulecpgs, module_names=module_names), 'spwnet_capsules.pkl')
torch.save(dict(module_names=module_names,module_lens=module_lens,dropout_p=0.2,hidden_topology=hidden_topology,n_output=n_out_caps),'spwnet_config.pkl')
except Exception as e:
print(e)
val_loss=-2
with sqlite3.connect('jobs.db', check_same_thread=False) as conn:
pd.DataFrame([job,val_loss],index=['job','val_loss'],columns=[0]).T.to_sql('val_loss',conn,if_exists='append')
else:
if test_methyl_array:
trainer.weights=1.
Y=trainer.predict(dataloaders['test'])
pickle.dump(Y,open('predictions.pkl','wb'))
val_loss=-1
#print(val_loss)
# print([min(trainer.val_losses),n_epochs,
# n_bins,
# bin_len,
# min_capsule_len,
# primary_caps_out_len,
# caps_out_len,
# hidden_topology,
# gamma,
# decoder_topology,
# learning_rate,
# routing_iterations])
return val_loss
| 34.907336 | 167 | 0.759982 | import pandas as pd
from pymethylprocess.MethylationDataTypes import MethylationArray
from sklearn.metrics import mean_absolute_error, r2_score
import warnings
warnings.filterwarnings("ignore")
from pybedtools import BedTool
import numpy as np
from functools import reduce
from torch.utils.data import Dataset, DataLoader
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
import os
import pysnooper
import argparse
import pickle
from sklearn.metrics import classification_report
import click
import methylcapsnet
from methylcapsnet.build_capsules import *
from methylcapsnet.methylcaps_data_models import *
import sqlite3
import os
import glob
import dask
from dask.diagnostics import ProgressBar
from pathos.multiprocessing import Pool
import multiprocessing
import dask.bag as db
from distributed import Client, LocalCluster, get_task_stream
RANDOM_SEED=42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@pysnooper.snoop('train.log')
def model_capsnet_(train_methyl_array='train_val_test_sets/train_methyl_array.pkl',
val_methyl_array='train_val_test_sets/val_methyl_array.pkl',
interest_col='disease',
n_epochs=10,
n_bins=0,
bin_len=1000000,
min_capsule_len=300,
primary_caps_out_len=45,
caps_out_len=45,
hidden_topology='30,80,50',
gamma=1e-2,
decoder_topology='100,300',
learning_rate=1e-2,
routing_iterations=3,
overlap=0.,
custom_loss='none',
gamma2=1e-2,
job=0,
capsule_choice=['genomic_binned'],
custom_capsule_file='',
test_methyl_array='',
predict=False,
batch_size=16,
limited_capsule_names_file='',
gsea_superset='',
tissue='',
number_sets=25,
use_set=False,
gene_context=False,
select_subtypes=[],
fit_spw=False,
l1_l2='',
custom_capsule_file2='',
min_capsules=5):
capsule_choice=list(capsule_choice)
hlt_list=filter(None,hidden_topology.split(','))
if hlt_list:
hidden_topology=list(map(int,hlt_list))
else:
hidden_topology=[]
hlt_list=filter(None,decoder_topology.split(','))
if hlt_list:
decoder_topology=list(map(int,hlt_list))
else:
decoder_topology=[]
hidden_caps_layers=[]
include_last=False
ma=MethylationArray.from_pickle(train_methyl_array)
ma_v=MethylationArray.from_pickle(val_methyl_array)
if test_methyl_array and predict:
ma_t=MethylationArray.from_pickle(test_methyl_array)
try:
ma.remove_na_samples(interest_col)
ma_v.remove_na_samples(interest_col)
if test_methyl_array and predict:
ma_t.remove_na_samples(interest_col)
except:
pass
if select_subtypes:
print(ma.pheno[interest_col].unique())
ma.pheno=ma.pheno.loc[ma.pheno[interest_col].isin(select_subtypes)]
ma.beta=ma.beta.loc[ma.pheno.index]
ma_v.pheno=ma_v.pheno.loc[ma_v.pheno[interest_col].isin(select_subtypes)]
ma_v.beta=ma_v.beta.loc[ma_v.pheno.index]
print(ma.pheno[interest_col].unique())
if test_methyl_array and predict:
ma_t.pheno=ma_t.pheno.loc[ma_t.pheno[interest_col].isin(select_subtypes)]
ma_t.beta=ma_t.beta.loc[ma_t.pheno.index]
if custom_capsule_file2 and os.path.exists(custom_capsule_file2):
capsules_dict=torch.load(custom_capsule_file2)
final_modules, modulecpgs, module_names=capsules_dict['final_modules'], capsules_dict['modulecpgs'], capsules_dict['module_names']
if min_capsule_len>1:
include_capsules=[len(x)>min_capsule_len for x in final_modules]
final_modules=[final_modules[i] for i in range(len(final_modules)) if include_capsules[i]]
module_names=[module_names[i] for i in range(len(module_names)) if include_capsules[i]]
modulecpgs=(reduce(np.union1d,final_modules)).tolist()
else:
final_modules, modulecpgs, module_names=build_capsules(capsule_choice,
overlap,
bin_len,
ma,
include_last,
min_capsule_len,
custom_capsule_file,
gsea_superset,
tissue,
gene_context,
use_set,
number_sets,
limited_capsule_names_file)
if custom_capsule_file2:
torch.save(dict(final_modules=final_modules, modulecpgs=modulecpgs, module_names=module_names),custom_capsule_file2)
assert len(final_modules) >= min_capsules , "Below the number of allowed capsules."
if fit_spw:
modulecpgs=list(reduce(lambda x,y:np.hstack((x,y)),final_modules))
if not include_last:
ma.beta=ma.beta.loc[:,modulecpgs]
ma_v.beta=ma_v.beta.loc[:,modulecpgs]
if test_methyl_array and predict:
ma_t.beta=ma_t.beta.loc[:,modulecpgs]
original_interest_col=interest_col
if n_bins:
new_interest_col=interest_col+'_binned'
ma.pheno.loc[:,new_interest_col],bins=pd.cut(ma.pheno[interest_col],bins=n_bins,retbins=True)
ma_v.pheno.loc[:,new_interest_col],_=pd.cut(ma_v.pheno[interest_col],bins=bins,retbins=True)
if test_methyl_array and predict:
ma_t.pheno.loc[:,new_interest_col],_=pd.cut(ma_t.pheno[interest_col],bins=bins,retbins=True)
interest_col=new_interest_col
datasets=dict()
datasets['train']=MethylationDataset(ma,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)
print(datasets['train'].X.isnull().sum().sum())
datasets['val']=MethylationDataset(ma_v,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)
if test_methyl_array and predict:
datasets['test']=MethylationDataset(ma_t,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)
dataloaders=dict()
dataloaders['train']=DataLoader(datasets['train'],batch_size=batch_size,shuffle=True,num_workers=8, pin_memory=True, drop_last=True)
dataloaders['val']=DataLoader(datasets['val'],batch_size=batch_size,shuffle=False,num_workers=8, pin_memory=True, drop_last=False)
n_primary=len(final_modules)
if test_methyl_array and predict:
dataloaders['test']=DataLoader(datasets['test'],batch_size=batch_size,shuffle=False,num_workers=8, pin_memory=True, drop_last=False)
n_inputs=list(map(len,final_modules))
n_out_caps=len(datasets['train'].y_unique)
if not fit_spw:
print("Not fitting MethylSPWNet")
primary_caps = PrimaryCaps(modules=final_modules,hidden_topology=hidden_topology,n_output=primary_caps_out_len)
hidden_caps = []
output_caps = CapsLayer(n_out_caps,n_primary,primary_caps_out_len,caps_out_len,routing_iterations=routing_iterations)
decoder = Decoder(n_out_caps*caps_out_len,len(list(ma.beta)),decoder_topology)
model = CapsNet(primary_caps, hidden_caps, output_caps, decoder, gamma=gamma)
if test_methyl_array and predict:
model.load_state_dict(torch.load('capsnet_model.pkl'))
else:
print("Fitting MethylSPWNet")
module_lens=[len(x) for x in final_modules]
model=MethylSPWNet(module_lens, hidden_topology, dropout_p=0.2, n_output=n_out_caps)
if test_methyl_array and predict:
model.load_state_dict(torch.load('spwnet_model.pkl'))
if torch.cuda.is_available():
model=model.cuda()
if l1_l2 and fit_spw:
l1,l2=list(map(float,l1_l2.split(',')))
elif fit_spw:
l1,l2=0.,0.
trainer=Trainer(model=model,
validation_dataloader=dataloaders['val'],
n_epochs=n_epochs,
lr=learning_rate,
n_primary=n_primary,
custom_loss=custom_loss,
gamma2=gamma2,
spw_mode=fit_spw,
l1=l1 if fit_spw else 0.,
l2=l2 if fit_spw else 0.)
if not predict:
try:
trainer.fit(dataloader=dataloaders['train'])
val_loss=min(trainer.val_losses)
torch.save(trainer.model.state_dict(),'capsnet_model.pkl' if not fit_spw else 'spwnet_model.pkl')
if fit_spw:
torch.save(dict(final_modules=final_modules, modulecpgs=modulecpgs, module_names=module_names), 'spwnet_capsules.pkl')
torch.save(dict(module_names=module_names,module_lens=module_lens,dropout_p=0.2,hidden_topology=hidden_topology,n_output=n_out_caps),'spwnet_config.pkl')
except Exception as e:
print(e)
val_loss=-2
with sqlite3.connect('jobs.db', check_same_thread=False) as conn:
pd.DataFrame([job,val_loss],index=['job','val_loss'],columns=[0]).T.to_sql('val_loss',conn,if_exists='append')
else:
if test_methyl_array:
trainer.weights=1.
Y=trainer.predict(dataloaders['test'])
pickle.dump(Y,open('predictions.pkl','wb'))
val_loss=-1
return val_loss
| true | true |
f7284b98b0f6a7bac4cce914ee74071f35acb4ad | 223 | py | Python | base/utils/auto_str_decorator.py | PeterStuck/teacher-app | e71c5b69019450a9ac8694fb461d343ce33e1b35 | [
"CC0-1.0"
] | null | null | null | base/utils/auto_str_decorator.py | PeterStuck/teacher-app | e71c5b69019450a9ac8694fb461d343ce33e1b35 | [
"CC0-1.0"
] | null | null | null | base/utils/auto_str_decorator.py | PeterStuck/teacher-app | e71c5b69019450a9ac8694fb461d343ce33e1b35 | [
"CC0-1.0"
] | null | null | null | def auto_str(cls):
def __str__(self):
return '%s(%s)' % (
type(self).__name__,
', '.join('%s=%s' % item for item in vars(self).items())
)
cls.__str__ = __str__
return cls | 24.777778 | 68 | 0.493274 | def auto_str(cls):
def __str__(self):
return '%s(%s)' % (
type(self).__name__,
', '.join('%s=%s' % item for item in vars(self).items())
)
cls.__str__ = __str__
return cls | true | true |
f7284c3b05332673bf46f3a853c15db048e94b4f | 1,589 | py | Python | webu/middleware/formatting.py | happyuc-project/webu.py | 5a01124fc84d74df09a33d9dabe88b704cd5b6c6 | [
"MIT"
] | null | null | null | webu/middleware/formatting.py | happyuc-project/webu.py | 5a01124fc84d74df09a33d9dabe88b704cd5b6c6 | [
"MIT"
] | null | null | null | webu/middleware/formatting.py | happyuc-project/webu.py | 5a01124fc84d74df09a33d9dabe88b704cd5b6c6 | [
"MIT"
] | null | null | null | from cytoolz.dicttoolz import (
assoc,
)
def construct_formatting_middleware(request_formatters=None,
result_formatters=None,
error_formatters=None):
if request_formatters is None:
request_formatters = {}
if result_formatters is None:
result_formatters = {}
if error_formatters is None:
error_formatters = {}
def formatter_middleware(make_request, webu):
def middleware(method, params):
if method in request_formatters:
formatter = request_formatters[method]
formatted_params = formatter(params)
response = make_request(method, formatted_params)
else:
response = make_request(method, params)
if 'result' in response and method in result_formatters:
formatter = result_formatters[method]
formatted_response = assoc(
response,
'result',
formatter(response['result']),
)
return formatted_response
elif 'error' in response and method in error_formatters:
formatter = error_formatters[method]
formatted_response = assoc(
response,
'error',
formatter(response['error']),
)
return formatted_response
else:
return response
return middleware
return formatter_middleware
| 35.311111 | 68 | 0.546885 | from cytoolz.dicttoolz import (
assoc,
)
def construct_formatting_middleware(request_formatters=None,
result_formatters=None,
error_formatters=None):
if request_formatters is None:
request_formatters = {}
if result_formatters is None:
result_formatters = {}
if error_formatters is None:
error_formatters = {}
def formatter_middleware(make_request, webu):
def middleware(method, params):
if method in request_formatters:
formatter = request_formatters[method]
formatted_params = formatter(params)
response = make_request(method, formatted_params)
else:
response = make_request(method, params)
if 'result' in response and method in result_formatters:
formatter = result_formatters[method]
formatted_response = assoc(
response,
'result',
formatter(response['result']),
)
return formatted_response
elif 'error' in response and method in error_formatters:
formatter = error_formatters[method]
formatted_response = assoc(
response,
'error',
formatter(response['error']),
)
return formatted_response
else:
return response
return middleware
return formatter_middleware
| true | true |
f7284d34e7332b7266ecb6fcdfa787da52f0bba3 | 2,050 | py | Python | api/control/getitem.py | savazeb/cosmos-ai | 4606e959396ebedca73086601078aa9c0ed77b31 | [
"MIT"
] | null | null | null | api/control/getitem.py | savazeb/cosmos-ai | 4606e959396ebedca73086601078aa9c0ed77b31 | [
"MIT"
] | null | null | null | api/control/getitem.py | savazeb/cosmos-ai | 4606e959396ebedca73086601078aa9c0ed77b31 | [
"MIT"
] | null | null | null | import json
import numpy as np
class stride():
def __init__(self, size = 1):
self.size = size
self.list = self.init_list()
def init_list(self):
return []
def add(self, value):
self.list.append(value)
if len(self.list) > self.size:
self.list = self.list[1:self.size+1]
directions = [
"not found", # 0b0000
"left", # 0b0001
"left back", # 0b0010
"left back", # 0b0011
"right back", # 0b0100
"undefined", # 0b0101
"back", # 0b0110
"left back", # 0b0111
"right", # 0b1000
"undefined", # 0b1001
"undefined", # 0b1010
"undefined", # 0b1011
"right back", # 0b1100
"undefined", # 0b1101
"right back", # 0b1110
"undefined", # 0b1111
None
]
def most_frequent(List):
return max(set(List), key = List.count)
ir_s = stride()
def getDirection(ir, stride_length):
ir_s.size = stride_length
direction = int.from_bytes(ir[0], 'little') & 0xf if ir else 16
ir_s.add(direction)
print(ir_s.list)
#print("[api] dir list", ir_s.list)
return directions[most_frequent(ir_s.list)]
def find(List):
if sum(x is not None for x in List) >= int(len(List)/2):
return max(index for index, item in enumerate(List) if item)
return max(index for index, item in enumerate(List) if not item)
cam_s = stride()
OBJ_BUFF = None, [None,None]
def getDetectedObject(cam, stride_length):
cam_s.size = stride_length
if cam:
obj = json.loads(cam[0].decode())
cam_s.add(list((obj["confidence"], obj["center"])))
else:
cam_s.add(list(OBJ_BUFF))
# print('[api] obj list', cam_s.list)
return cam_s.list[find(cam_s.list)]
def getPoint(lidar):
angles = []
ranges = []
if lidar:
point = lidar[0].decode()
point = json.loads(point)
for key, val in point.items():
angles.append(int(key))
ranges.append(float(val))
return np.array([angles, ranges])
| 26.623377 | 68 | 0.582927 | import json
import numpy as np
class stride():
def __init__(self, size = 1):
self.size = size
self.list = self.init_list()
def init_list(self):
return []
def add(self, value):
self.list.append(value)
if len(self.list) > self.size:
self.list = self.list[1:self.size+1]
directions = [
"not found",
"left",
"left back",
"left back",
"right back",
"undefined",
"back",
"left back",
"right",
"undefined",
"undefined",
"undefined",
"right back",
"undefined",
"right back",
"undefined",
None
]
def most_frequent(List):
return max(set(List), key = List.count)
ir_s = stride()
def getDirection(ir, stride_length):
ir_s.size = stride_length
direction = int.from_bytes(ir[0], 'little') & 0xf if ir else 16
ir_s.add(direction)
print(ir_s.list)
return directions[most_frequent(ir_s.list)]
def find(List):
if sum(x is not None for x in List) >= int(len(List)/2):
return max(index for index, item in enumerate(List) if item)
return max(index for index, item in enumerate(List) if not item)
cam_s = stride()
OBJ_BUFF = None, [None,None]
def getDetectedObject(cam, stride_length):
cam_s.size = stride_length
if cam:
obj = json.loads(cam[0].decode())
cam_s.add(list((obj["confidence"], obj["center"])))
else:
cam_s.add(list(OBJ_BUFF))
return cam_s.list[find(cam_s.list)]
def getPoint(lidar):
angles = []
ranges = []
if lidar:
point = lidar[0].decode()
point = json.loads(point)
for key, val in point.items():
angles.append(int(key))
ranges.append(float(val))
return np.array([angles, ranges])
| true | true |
f7284ee7f93f92b8e64bb9baabdaaf8617ec383d | 3,401 | py | Python | src/dispatch/models.py | vj-codes/dispatch | f9354781956380cac290be02fb987eb50ddc1a5d | [
"Apache-2.0"
] | null | null | null | src/dispatch/models.py | vj-codes/dispatch | f9354781956380cac290be02fb987eb50ddc1a5d | [
"Apache-2.0"
] | null | null | null | src/dispatch/models.py | vj-codes/dispatch | f9354781956380cac290be02fb987eb50ddc1a5d | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from typing import List, Optional
import validators
from pydantic import BaseModel, validator
from sqlalchemy import Boolean, Column, DateTime, Integer, String, event, ForeignKey
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
# SQLAlchemy models...
class ProjectMixin(object):
""" Project mixin"""
@declared_attr
def project_id(cls): # noqa
return Column(Integer, ForeignKey("project.id", ondelete="CASCADE"))
@declared_attr
def project(cls): # noqa
return relationship("Project")
class TimeStampMixin(object):
""" Timestamping mixin"""
created_at = Column(DateTime, default=datetime.utcnow)
created_at._creation_order = 9998
updated_at = Column(DateTime, default=datetime.utcnow)
updated_at._creation_order = 9998
@staticmethod
def _updated_at(mapper, connection, target):
target.updated_at = datetime.utcnow()
@classmethod
def __declare_last__(cls):
event.listen(cls, "before_update", cls._updated_at)
class ContactMixin(TimeStampMixin):
""" Contact mixin"""
is_active = Column(Boolean, default=True)
is_external = Column(Boolean, default=False)
contact_type = Column(String)
email = Column(String)
company = Column(String)
notes = Column(String)
owner = Column(String)
class ResourceMixin(TimeStampMixin):
"""Resource mixin."""
resource_type = Column(String)
resource_id = Column(String)
weblink = Column(String)
# Pydantic models...
class DispatchBase(BaseModel):
class Config:
orm_mode = True
validate_assignment = True
arbitrary_types_allowed = True
class ResourceBase(DispatchBase):
resource_type: Optional[str] = None
resource_id: Optional[str] = None
weblink: Optional[str] = None
@validator("weblink")
def sanitize_weblink(cls, v):
if validators.url(v):
return v
raise ValueError("Weblink must be a valid url.")
class ContactBase(DispatchBase):
email: str
name: Optional[str] = None
is_active: Optional[bool] = True
is_external: Optional[bool] = False
company: Optional[str] = None
contact_type: Optional[str] = None
notes: Optional[str] = None
owner: Optional[str] = None
class PluginOptionModel(DispatchBase):
pass
# self referential models
class TermNested(DispatchBase):
id: Optional[int]
text: str
# disabling this for now as recursive models break swagger api gen
# definitions: Optional[List["DefinitionNested"]] = []
class DefinitionNested(DispatchBase):
id: Optional[int]
text: str
terms: Optional[List["TermNested"]] = []
class ServiceNested(DispatchBase):
pass
class IndividualNested(DispatchBase):
pass
class TeamNested(DispatchBase):
pass
class TermReadNested(DispatchBase):
id: int
text: str
class DefinitionReadNested(DispatchBase):
id: int
text: str
class ServiceReadNested(DispatchBase):
name: Optional[str] = None
external_id: Optional[str] = None
is_active: Optional[bool] = None
type: Optional[str] = None
class IndividualReadNested(ContactBase):
id: Optional[int]
title: Optional[str] = None
external_id: Optional[str]
weblink: Optional[str]
title: Optional[str]
class TeamReadNested(ContactBase):
pass
| 22.825503 | 84 | 0.699206 | from datetime import datetime
from typing import List, Optional
import validators
from pydantic import BaseModel, validator
from sqlalchemy import Boolean, Column, DateTime, Integer, String, event, ForeignKey
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
class ProjectMixin(object):
@declared_attr
def project_id(cls):
return Column(Integer, ForeignKey("project.id", ondelete="CASCADE"))
@declared_attr
def project(cls):
return relationship("Project")
class TimeStampMixin(object):
created_at = Column(DateTime, default=datetime.utcnow)
created_at._creation_order = 9998
updated_at = Column(DateTime, default=datetime.utcnow)
updated_at._creation_order = 9998
@staticmethod
def _updated_at(mapper, connection, target):
target.updated_at = datetime.utcnow()
@classmethod
def __declare_last__(cls):
event.listen(cls, "before_update", cls._updated_at)
class ContactMixin(TimeStampMixin):
is_active = Column(Boolean, default=True)
is_external = Column(Boolean, default=False)
contact_type = Column(String)
email = Column(String)
company = Column(String)
notes = Column(String)
owner = Column(String)
class ResourceMixin(TimeStampMixin):
resource_type = Column(String)
resource_id = Column(String)
weblink = Column(String)
class DispatchBase(BaseModel):
class Config:
orm_mode = True
validate_assignment = True
arbitrary_types_allowed = True
class ResourceBase(DispatchBase):
resource_type: Optional[str] = None
resource_id: Optional[str] = None
weblink: Optional[str] = None
@validator("weblink")
def sanitize_weblink(cls, v):
if validators.url(v):
return v
raise ValueError("Weblink must be a valid url.")
class ContactBase(DispatchBase):
email: str
name: Optional[str] = None
is_active: Optional[bool] = True
is_external: Optional[bool] = False
company: Optional[str] = None
contact_type: Optional[str] = None
notes: Optional[str] = None
owner: Optional[str] = None
class PluginOptionModel(DispatchBase):
pass
class TermNested(DispatchBase):
id: Optional[int]
text: str
class DefinitionNested(DispatchBase):
id: Optional[int]
text: str
terms: Optional[List["TermNested"]] = []
class ServiceNested(DispatchBase):
pass
class IndividualNested(DispatchBase):
pass
class TeamNested(DispatchBase):
pass
class TermReadNested(DispatchBase):
id: int
text: str
class DefinitionReadNested(DispatchBase):
id: int
text: str
class ServiceReadNested(DispatchBase):
name: Optional[str] = None
external_id: Optional[str] = None
is_active: Optional[bool] = None
type: Optional[str] = None
class IndividualReadNested(ContactBase):
id: Optional[int]
title: Optional[str] = None
external_id: Optional[str]
weblink: Optional[str]
title: Optional[str]
class TeamReadNested(ContactBase):
pass
| true | true |
f7284ff02950f77f56cfd81b481051562718552f | 4,423 | py | Python | bioconda_utils/graph.py | alshai/bioconda-utils | 68bbf927ac1996d5c93a5583ed0bbe9d7eacc821 | [
"MIT"
] | 90 | 2016-07-16T02:52:15.000Z | 2022-03-17T06:09:41.000Z | bioconda_utils/graph.py | alshai/bioconda-utils | 68bbf927ac1996d5c93a5583ed0bbe9d7eacc821 | [
"MIT"
] | 550 | 2016-05-29T16:07:13.000Z | 2022-03-25T00:06:47.000Z | bioconda_utils/graph.py | alshai/bioconda-utils | 68bbf927ac1996d5c93a5583ed0bbe9d7eacc821 | [
"MIT"
] | 113 | 2016-09-04T22:02:33.000Z | 2022-03-30T20:00:32.000Z | """
Construction and Manipulation of Package/Recipe Graphs
"""
import logging
from collections import defaultdict
from fnmatch import fnmatch
from itertools import chain
import networkx as nx
from . import utils
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def build(recipes, config, blacklist=None, restrict=True):
"""
Returns the DAG of recipe paths and a dictionary that maps package names to
lists of recipe paths to all defined versions of the package. defined
versions.
Parameters
----------
recipes : iterable
An iterable of recipe paths, typically obtained via `get_recipes()`
blacklist : set
Package names to skip
restrict : bool
If True, then dependencies will be included in the DAG only if they are
themselves in `recipes`. Otherwise, include all dependencies of
`recipes`.
Returns
-------
dag : nx.DiGraph
Directed graph of packages -- nodes are package names; edges are
dependencies (both run and build dependencies)
name2recipe : dict
Dictionary mapping package names to recipe paths. These recipe path
values are lists and contain paths to all defined versions.
"""
logger.info("Generating DAG")
recipes = list(recipes)
metadata = list(utils.parallel_iter(utils.load_meta_fast, recipes, "Loading Recipes"))
if blacklist is None:
blacklist = set()
# name2recipe is meta.yaml's package:name mapped to the recipe path.
#
# A name should map to exactly one recipe. It is possible for multiple
# names to map to the same recipe, if the package name somehow depends on
# the environment.
#
# Note that this may change once we support conda-build 3.
name2recipe = defaultdict(set)
for meta, recipe in metadata:
name = meta["package"]["name"]
if name not in blacklist:
name2recipe[name].update([recipe])
def get_deps(meta, sec):
reqs = meta.get("requirements")
if not reqs:
return []
deps = reqs.get(sec)
if not deps:
return []
return [dep.split()[0] for dep in deps if dep]
def get_inner_deps(dependencies):
dependencies = list(dependencies)
for dep in dependencies:
if dep in name2recipe or not restrict:
yield dep
dag = nx.DiGraph()
dag.add_nodes_from(meta["package"]["name"]
for meta, recipe in metadata)
for meta, recipe in metadata:
name = meta["package"]["name"]
dag.add_edges_from(
(dep, name)
for dep in set(chain(
get_inner_deps(get_deps(meta, "build")),
get_inner_deps(get_deps(meta, "host")),
))
)
return dag, name2recipe
def build_from_recipes(recipes):
logger.info("Building Recipe DAG")
package2recipes = {}
recipe_list = []
for recipe in recipes:
for package in recipe.package_names:
package2recipes.setdefault(package, set()).add(recipe)
recipe_list.append(recipe)
dag = nx.DiGraph()
dag.add_nodes_from(recipe for recipe in recipe_list)
dag.add_edges_from(
(recipe2, recipe)
for recipe in recipe_list
for dep in recipe.get_deps()
for recipe2 in package2recipes.get(dep, [])
)
logger.info("Building Recipe DAG: done (%i nodes, %i edges)", len(dag), len(dag.edges()))
return dag
def filter_recipe_dag(dag, include, exclude):
"""Reduces **dag** to packages in **names** and their requirements"""
nodes = set()
for recipe in dag:
if (recipe not in nodes
and any(fnmatch(recipe.reldir, p) for p in include)
and not any(fnmatch(recipe.reldir, p) for p in exclude)):
nodes.add(recipe)
nodes |= nx.ancestors(dag, recipe)
return nx.subgraph(dag, nodes)
def filter(dag, packages):
nodes = set()
for package in packages:
if package in nodes:
continue # already got all ancestors
nodes.add(package)
try:
nodes |= nx.ancestors(dag, package)
except nx.exception.NetworkXError:
if package not in nx.nodes(dag):
logger.error("Can't find %s in dag", package)
else:
raise
return nx.subgraph(dag, nodes)
| 29.885135 | 93 | 0.625141 |
import logging
from collections import defaultdict
from fnmatch import fnmatch
from itertools import chain
import networkx as nx
from . import utils
logger = logging.getLogger(__name__)
def build(recipes, config, blacklist=None, restrict=True):
logger.info("Generating DAG")
recipes = list(recipes)
metadata = list(utils.parallel_iter(utils.load_meta_fast, recipes, "Loading Recipes"))
if blacklist is None:
blacklist = set()
#
# A name should map to exactly one recipe. It is possible for multiple
# names to map to the same recipe, if the package name somehow depends on
# the environment.
#
# Note that this may change once we support conda-build 3.
name2recipe = defaultdict(set)
for meta, recipe in metadata:
name = meta["package"]["name"]
if name not in blacklist:
name2recipe[name].update([recipe])
def get_deps(meta, sec):
reqs = meta.get("requirements")
if not reqs:
return []
deps = reqs.get(sec)
if not deps:
return []
return [dep.split()[0] for dep in deps if dep]
def get_inner_deps(dependencies):
dependencies = list(dependencies)
for dep in dependencies:
if dep in name2recipe or not restrict:
yield dep
dag = nx.DiGraph()
dag.add_nodes_from(meta["package"]["name"]
for meta, recipe in metadata)
for meta, recipe in metadata:
name = meta["package"]["name"]
dag.add_edges_from(
(dep, name)
for dep in set(chain(
get_inner_deps(get_deps(meta, "build")),
get_inner_deps(get_deps(meta, "host")),
))
)
return dag, name2recipe
def build_from_recipes(recipes):
logger.info("Building Recipe DAG")
package2recipes = {}
recipe_list = []
for recipe in recipes:
for package in recipe.package_names:
package2recipes.setdefault(package, set()).add(recipe)
recipe_list.append(recipe)
dag = nx.DiGraph()
dag.add_nodes_from(recipe for recipe in recipe_list)
dag.add_edges_from(
(recipe2, recipe)
for recipe in recipe_list
for dep in recipe.get_deps()
for recipe2 in package2recipes.get(dep, [])
)
logger.info("Building Recipe DAG: done (%i nodes, %i edges)", len(dag), len(dag.edges()))
return dag
def filter_recipe_dag(dag, include, exclude):
nodes = set()
for recipe in dag:
if (recipe not in nodes
and any(fnmatch(recipe.reldir, p) for p in include)
and not any(fnmatch(recipe.reldir, p) for p in exclude)):
nodes.add(recipe)
nodes |= nx.ancestors(dag, recipe)
return nx.subgraph(dag, nodes)
def filter(dag, packages):
nodes = set()
for package in packages:
if package in nodes:
continue # already got all ancestors
nodes.add(package)
try:
nodes |= nx.ancestors(dag, package)
except nx.exception.NetworkXError:
if package not in nx.nodes(dag):
logger.error("Can't find %s in dag", package)
else:
raise
return nx.subgraph(dag, nodes)
| true | true |
f72850f949715caa7282e6ca85f148a75ebc1074 | 12,807 | py | Python | python/paddle/fluid/tests/unittests/test_dist_fleet_base.py | donproc/Paddle | 23261ff44ba46a507e72e2da7c83f7fede3486f7 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_dist_fleet_base.py | donproc/Paddle | 23261ff44ba46a507e72e2da7c83f7fede3486f7 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_dist_fleet_base.py | donproc/Paddle | 23261ff44ba46a507e72e2da7c83f7fede3486f7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
"""
high level unit test for distribute fleet.
"""
import os
import sys
import subprocess
import six
import shutil
import numpy as np
import argparse
from contextlib import closing
import socket
import time
import tempfile
import unittest
import paddle.fluid as fluid
import paddle.distributed.fleet.base.role_maker as role_maker
from paddle.distributed.fleet.base.util_factory import fleet_util
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory
__all__ = ['FleetDistRunnerBase', 'TestFleetBase', 'runtime_main']
RUN_STEP = 5
LEARNING_RATE = 0.01
DIST_UT_PORT = 0
class FleetDistRunnerBase(object):
"""
run_pserver,run_trainer : after init role, using transpiler split program
net : implment by child class, the network of model
do training : exe run program
"""
def build_role(self, args):
if args.role.upper() == "PSERVER":
role = role_maker.UserDefinedRoleMaker(
is_collective=False,
init_gloo=True,
path=args.gloo_path,
current_id=args.current_id,
role=role_maker.Role.SERVER,
worker_endpoints=args.trainer_endpoints.split(","),
server_endpoints=args.endpoints.split(","))
else:
role = role_maker.UserDefinedRoleMaker(
is_collective=False,
init_gloo=True,
path=args.gloo_path,
current_id=args.current_id,
role=role_maker.Role.WORKER,
worker_endpoints=args.trainer_endpoints.split(","),
server_endpoints=args.endpoints.split(","))
self.role = role
return role
def build_strategy(self, args):
self.strategy = None
if args.mode == "async":
self.strategy = StrategyFactory.create_async_strategy()
elif args.mode == "sync":
self.strategy = StrategyFactory.create_sync_strategy()
elif args.mode == "half_async":
self.strategy = StrategyFactory.create_half_async_strategy()
elif args.mode == "geo":
self.strategy = StrategyFactory.create_geo_strategy(
args.geo_sgd_need_push_nums)
self.dump_param = os.getenv("dump_param", "").split(",")
self.dump_fields = os.getenv("dump_fields", "").split(",")
self.dump_fields_path = os.getenv("dump_fields_path", "")
debug = int(os.getenv("Debug", "0"))
if debug:
self.strategy.set_debug_opt({
"dump_param": self.dump_param,
"dump_fields": self.dump_fields,
"dump_fields_path": self.dump_fields_path
})
return self.strategy
def build_optimizer(self, avg_cost, strategy):
use_grad_clip = int(os.getenv('GRAD_CLIP', 0))
if use_grad_clip:
# 1: clip_by_value; 2: clip_by_norm; 3:clip_by_global_norm
if use_grad_clip == 1:
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByValue(2.0))
elif use_grad_clip == 2:
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByNorm(2.0))
elif use_grad_clip == 3:
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(2.0))
use_decay = int(os.getenv("DECAY", "0"))
if use_decay:
optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay(
learning_rate=LEARNING_RATE,
decay_steps=500,
decay_rate=0.969,
staircase=True))
else:
optimizer = fluid.optimizer.SGD(LEARNING_RATE)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(avg_cost)
def run_pserver(self, args):
fleet.init_server()
fleet.run_server()
def run_dataset_trainer(self, args):
out = self.do_dataset_training(fleet)
def run_pyreader_trainer(self, args):
out = self.do_pyreader_training(fleet)
def net(self, args, batch_size=4, lr=0.01):
raise NotImplementedError(
"get_model should be implemented by child classes.")
def do_dataset_training(self, fleet):
raise NotImplementedError(
"do_dataset_training should be implemented by child classes.")
def do_pyreader_training(self, fleet):
raise NotImplementedError(
"do_pyreader_training should be implemented by child classes.")
class TestFleetBase(unittest.TestCase):
"""
start_pserver,start_trainer : add start cmd to test
run_cluster : using multi process to test distribute program
"""
def _setup_config(self):
raise NotImplementedError("tests should have _setup_config implemented")
def setUp(self):
self._mode = "sync"
self._reader = "pyreader"
self._trainers = 2
self._pservers = 2
self._port_set = set()
global DIST_UT_PORT
if DIST_UT_PORT == 0 and os.getenv("PADDLE_DIST_UT_PORT"):
DIST_UT_PORT = int(os.getenv("PADDLE_DIST_UT_PORT"))
if DIST_UT_PORT:
print("set begin_port:", DIST_UT_PORT)
self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
DIST_UT_PORT, DIST_UT_PORT + 1)
self._tr_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
DIST_UT_PORT + 2, DIST_UT_PORT + 3)
DIST_UT_PORT += 4
else:
self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
self._find_free_port(), self._find_free_port())
self._tr_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
self._find_free_port(), self._find_free_port())
self._python_interp = sys.executable
self._geo_sgd_need_push_nums = 5
self._grad_clip_mode = 0
self._setup_config()
def _find_free_port(self):
def __free_port():
with closing(socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
while True:
port = __free_port()
if port not in self._port_set:
self._port_set.add(port)
return port
def _start_pserver(self, cmd, required_envs):
ps0_cmd, ps1_cmd = cmd.format(0), cmd.format(1)
ps0_pipe = open(tempfile.gettempdir() + "/ps0_err.log", "wb+")
ps1_pipe = open(tempfile.gettempdir() + "/ps1_err.log", "wb+")
ps0_proc = subprocess.Popen(
ps0_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=ps0_pipe,
env=required_envs)
ps1_proc = subprocess.Popen(
ps1_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=ps1_pipe,
env=required_envs)
return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
def _start_trainer(self, cmd, required_envs):
tr0_cmd, tr1_cmd = cmd.format(0), cmd.format(1)
tr0_pipe = open(tempfile.gettempdir() + "/tr0_err.log", "wb+")
tr1_pipe = open(tempfile.gettempdir() + "/tr1_err.log", "wb+")
tr0_proc = subprocess.Popen(
tr0_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=tr0_pipe,
env=required_envs)
tr1_proc = subprocess.Popen(
tr1_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=tr1_pipe,
env=required_envs)
return tr0_proc, tr1_proc, tr0_pipe, tr1_pipe
def _run_cluster(self, model, envs):
env = {'GRAD_CLIP': str(self._grad_clip_mode)}
python_path = self._python_interp
gloo_path = tempfile.mkdtemp()
if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
python_path += " -m coverage run --branch -p"
env.update(envs)
tr_cmd = "{0} {1} --role trainer --endpoints {2} --trainer_endpoints {3} --current_id {{}} --trainers {4} --mode {5} --geo_sgd_need_push_nums {6} --reader {7} --gloo_path {8}".format(
python_path, model, self._ps_endpoints, self._tr_endpoints,
self._trainers, self._mode, self._geo_sgd_need_push_nums,
self._reader, gloo_path)
ps_cmd = "{0} {1} --role pserver --endpoints {2} --trainer_endpoints {3} --current_id {{}} --trainers {4} --mode {5} --geo_sgd_need_push_nums {6} --reader {7} --gloo_path {8}".format(
python_path, model, self._ps_endpoints, self._tr_endpoints,
self._trainers, self._mode, self._geo_sgd_need_push_nums,
self._reader, gloo_path)
# Run dist train to compare with local results
ps0, ps1, ps0_pipe, ps1_pipe = self._start_pserver(ps_cmd, env)
tr0, tr1, tr0_pipe, tr1_pipe = self._start_trainer(tr_cmd, env)
# Wait until trainer process terminate
while True:
stat0 = tr0.poll()
time.sleep(0.1)
if stat0 is not None:
break
while True:
stat1 = tr1.poll()
time.sleep(0.1)
if stat1 is not None:
break
tr0_out, tr0_err = tr0.communicate()
tr1_out, tr1_err = tr1.communicate()
tr0_ret = tr0.returncode
tr1_ret = tr0.returncode
self.assertEqual(tr0_ret, 0, "something wrong in tr0, please check")
self.assertEqual(tr1_ret, 0, "something wrong in tr1, please check")
# close trainer file
tr0_pipe.close()
tr1_pipe.close()
ps0_pipe.close()
ps1_pipe.close()
ps0.terminate()
ps1.terminate()
shutil.rmtree(gloo_path)
return 0, 0
def check_with_place(self,
model_file,
delta=1e-3,
check_error_log=False,
need_envs={}):
required_envs = {
"PATH": os.getenv("PATH", ""),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"FLAGS_rpc_deadline": "5000", # 5sec to fail fast
"http_proxy": ""
}
required_envs.update(need_envs)
if check_error_log:
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs)
def runtime_main(test_class):
parser = argparse.ArgumentParser(description='Run Fleet test.')
parser.add_argument(
'--role', type=str, required=True, choices=['pserver', 'trainer'])
parser.add_argument('--endpoints', type=str, required=False, default="")
parser.add_argument(
'--trainer_endpoints', type=str, required=False, default="")
parser.add_argument('--gloo_path', type=str, required=False, default="")
parser.add_argument('--current_id', type=int, required=False, default=0)
parser.add_argument('--trainers', type=int, required=False, default=1)
parser.add_argument('--mode', type=str, required=False, default='geo')
parser.add_argument(
'--geo_sgd_need_push_nums', type=int, required=False, default=2)
parser.add_argument('--reader', type=str, required=False, default='dataset')
args = parser.parse_args()
model = test_class()
role = model.build_role(args)
fleet.init(role)
strategy = model.build_strategy(args)
avg_cost = model.net(args)
model.build_optimizer(avg_cost, strategy)
fleet_util._set_strategy(strategy)
fleet_util._set_role_maker(role)
if args.role == "pserver":
model.run_pserver(args)
else:
if args.reader == "dataset":
model.run_dataset_trainer(args)
else:
model.run_pyreader_trainer(args)
| 36.591429 | 191 | 0.612165 |
from __future__ import print_function
import os
import sys
import subprocess
import six
import shutil
import numpy as np
import argparse
from contextlib import closing
import socket
import time
import tempfile
import unittest
import paddle.fluid as fluid
import paddle.distributed.fleet.base.role_maker as role_maker
from paddle.distributed.fleet.base.util_factory import fleet_util
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory
__all__ = ['FleetDistRunnerBase', 'TestFleetBase', 'runtime_main']
RUN_STEP = 5
LEARNING_RATE = 0.01
DIST_UT_PORT = 0
class FleetDistRunnerBase(object):
def build_role(self, args):
if args.role.upper() == "PSERVER":
role = role_maker.UserDefinedRoleMaker(
is_collective=False,
init_gloo=True,
path=args.gloo_path,
current_id=args.current_id,
role=role_maker.Role.SERVER,
worker_endpoints=args.trainer_endpoints.split(","),
server_endpoints=args.endpoints.split(","))
else:
role = role_maker.UserDefinedRoleMaker(
is_collective=False,
init_gloo=True,
path=args.gloo_path,
current_id=args.current_id,
role=role_maker.Role.WORKER,
worker_endpoints=args.trainer_endpoints.split(","),
server_endpoints=args.endpoints.split(","))
self.role = role
return role
def build_strategy(self, args):
self.strategy = None
if args.mode == "async":
self.strategy = StrategyFactory.create_async_strategy()
elif args.mode == "sync":
self.strategy = StrategyFactory.create_sync_strategy()
elif args.mode == "half_async":
self.strategy = StrategyFactory.create_half_async_strategy()
elif args.mode == "geo":
self.strategy = StrategyFactory.create_geo_strategy(
args.geo_sgd_need_push_nums)
self.dump_param = os.getenv("dump_param", "").split(",")
self.dump_fields = os.getenv("dump_fields", "").split(",")
self.dump_fields_path = os.getenv("dump_fields_path", "")
debug = int(os.getenv("Debug", "0"))
if debug:
self.strategy.set_debug_opt({
"dump_param": self.dump_param,
"dump_fields": self.dump_fields,
"dump_fields_path": self.dump_fields_path
})
return self.strategy
def build_optimizer(self, avg_cost, strategy):
use_grad_clip = int(os.getenv('GRAD_CLIP', 0))
if use_grad_clip:
if use_grad_clip == 1:
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByValue(2.0))
elif use_grad_clip == 2:
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByNorm(2.0))
elif use_grad_clip == 3:
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(2.0))
use_decay = int(os.getenv("DECAY", "0"))
if use_decay:
optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay(
learning_rate=LEARNING_RATE,
decay_steps=500,
decay_rate=0.969,
staircase=True))
else:
optimizer = fluid.optimizer.SGD(LEARNING_RATE)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(avg_cost)
def run_pserver(self, args):
fleet.init_server()
fleet.run_server()
def run_dataset_trainer(self, args):
out = self.do_dataset_training(fleet)
def run_pyreader_trainer(self, args):
out = self.do_pyreader_training(fleet)
def net(self, args, batch_size=4, lr=0.01):
raise NotImplementedError(
"get_model should be implemented by child classes.")
def do_dataset_training(self, fleet):
raise NotImplementedError(
"do_dataset_training should be implemented by child classes.")
def do_pyreader_training(self, fleet):
raise NotImplementedError(
"do_pyreader_training should be implemented by child classes.")
class TestFleetBase(unittest.TestCase):
def _setup_config(self):
raise NotImplementedError("tests should have _setup_config implemented")
def setUp(self):
self._mode = "sync"
self._reader = "pyreader"
self._trainers = 2
self._pservers = 2
self._port_set = set()
global DIST_UT_PORT
if DIST_UT_PORT == 0 and os.getenv("PADDLE_DIST_UT_PORT"):
DIST_UT_PORT = int(os.getenv("PADDLE_DIST_UT_PORT"))
if DIST_UT_PORT:
print("set begin_port:", DIST_UT_PORT)
self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
DIST_UT_PORT, DIST_UT_PORT + 1)
self._tr_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
DIST_UT_PORT + 2, DIST_UT_PORT + 3)
DIST_UT_PORT += 4
else:
self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
self._find_free_port(), self._find_free_port())
self._tr_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
self._find_free_port(), self._find_free_port())
self._python_interp = sys.executable
self._geo_sgd_need_push_nums = 5
self._grad_clip_mode = 0
self._setup_config()
def _find_free_port(self):
def __free_port():
with closing(socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
while True:
port = __free_port()
if port not in self._port_set:
self._port_set.add(port)
return port
def _start_pserver(self, cmd, required_envs):
ps0_cmd, ps1_cmd = cmd.format(0), cmd.format(1)
ps0_pipe = open(tempfile.gettempdir() + "/ps0_err.log", "wb+")
ps1_pipe = open(tempfile.gettempdir() + "/ps1_err.log", "wb+")
ps0_proc = subprocess.Popen(
ps0_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=ps0_pipe,
env=required_envs)
ps1_proc = subprocess.Popen(
ps1_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=ps1_pipe,
env=required_envs)
return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
def _start_trainer(self, cmd, required_envs):
tr0_cmd, tr1_cmd = cmd.format(0), cmd.format(1)
tr0_pipe = open(tempfile.gettempdir() + "/tr0_err.log", "wb+")
tr1_pipe = open(tempfile.gettempdir() + "/tr1_err.log", "wb+")
tr0_proc = subprocess.Popen(
tr0_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=tr0_pipe,
env=required_envs)
tr1_proc = subprocess.Popen(
tr1_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=tr1_pipe,
env=required_envs)
return tr0_proc, tr1_proc, tr0_pipe, tr1_pipe
def _run_cluster(self, model, envs):
env = {'GRAD_CLIP': str(self._grad_clip_mode)}
python_path = self._python_interp
gloo_path = tempfile.mkdtemp()
if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
python_path += " -m coverage run --branch -p"
env.update(envs)
tr_cmd = "{0} {1} --role trainer --endpoints {2} --trainer_endpoints {3} --current_id {{}} --trainers {4} --mode {5} --geo_sgd_need_push_nums {6} --reader {7} --gloo_path {8}".format(
python_path, model, self._ps_endpoints, self._tr_endpoints,
self._trainers, self._mode, self._geo_sgd_need_push_nums,
self._reader, gloo_path)
ps_cmd = "{0} {1} --role pserver --endpoints {2} --trainer_endpoints {3} --current_id {{}} --trainers {4} --mode {5} --geo_sgd_need_push_nums {6} --reader {7} --gloo_path {8}".format(
python_path, model, self._ps_endpoints, self._tr_endpoints,
self._trainers, self._mode, self._geo_sgd_need_push_nums,
self._reader, gloo_path)
ps0, ps1, ps0_pipe, ps1_pipe = self._start_pserver(ps_cmd, env)
tr0, tr1, tr0_pipe, tr1_pipe = self._start_trainer(tr_cmd, env)
while True:
stat0 = tr0.poll()
time.sleep(0.1)
if stat0 is not None:
break
while True:
stat1 = tr1.poll()
time.sleep(0.1)
if stat1 is not None:
break
tr0_out, tr0_err = tr0.communicate()
tr1_out, tr1_err = tr1.communicate()
tr0_ret = tr0.returncode
tr1_ret = tr0.returncode
self.assertEqual(tr0_ret, 0, "something wrong in tr0, please check")
self.assertEqual(tr1_ret, 0, "something wrong in tr1, please check")
tr0_pipe.close()
tr1_pipe.close()
ps0_pipe.close()
ps1_pipe.close()
ps0.terminate()
ps1.terminate()
shutil.rmtree(gloo_path)
return 0, 0
def check_with_place(self,
model_file,
delta=1e-3,
check_error_log=False,
need_envs={}):
required_envs = {
"PATH": os.getenv("PATH", ""),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"FLAGS_rpc_deadline": "5000",
"http_proxy": ""
}
required_envs.update(need_envs)
if check_error_log:
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs)
def runtime_main(test_class):
parser = argparse.ArgumentParser(description='Run Fleet test.')
parser.add_argument(
'--role', type=str, required=True, choices=['pserver', 'trainer'])
parser.add_argument('--endpoints', type=str, required=False, default="")
parser.add_argument(
'--trainer_endpoints', type=str, required=False, default="")
parser.add_argument('--gloo_path', type=str, required=False, default="")
parser.add_argument('--current_id', type=int, required=False, default=0)
parser.add_argument('--trainers', type=int, required=False, default=1)
parser.add_argument('--mode', type=str, required=False, default='geo')
parser.add_argument(
'--geo_sgd_need_push_nums', type=int, required=False, default=2)
parser.add_argument('--reader', type=str, required=False, default='dataset')
args = parser.parse_args()
model = test_class()
role = model.build_role(args)
fleet.init(role)
strategy = model.build_strategy(args)
avg_cost = model.net(args)
model.build_optimizer(avg_cost, strategy)
fleet_util._set_strategy(strategy)
fleet_util._set_role_maker(role)
if args.role == "pserver":
model.run_pserver(args)
else:
if args.reader == "dataset":
model.run_dataset_trainer(args)
else:
model.run_pyreader_trainer(args)
| true | true |
f72850feefd4a690b228282eb0040c8340c2bdc4 | 2,251 | py | Python | source/flask_api.py | PhoxSpark/pytom2 | 2ef46342d5fc0e98e6d059c86538035d6c758636 | [
"Apache-2.0"
] | null | null | null | source/flask_api.py | PhoxSpark/pytom2 | 2ef46342d5fc0e98e6d059c86538035d6c758636 | [
"Apache-2.0"
] | null | null | null | source/flask_api.py | PhoxSpark/pytom2 | 2ef46342d5fc0e98e6d059c86538035d6c758636 | [
"Apache-2.0"
] | null | null | null | """
Flask API main module.
"""
from __future__ import absolute_import
import logging
from flask import Flask
from flask_restplus import Api, Resource, fields
from pytom2.source.pdb_parser_module import PDB
logging.info("Initializing Flask objects...")
APP = Flask(__name__)
API = Api(APP)
logging.info("Initializing model for organism and dictionary...")
MODEL_ORGANISM = API.model("Organism", {"organism" : fields.String("Organism name")})
ORGANISMS = {}
logging.info("Initializing Swagger UI...")
APP.config["SWAGGER_UI_JSONEDITOR"] = True
@API.route('/pytom')
class Pytom(Resource):
"""
Pytom main class for /pytom route.
"""
def get(self): #pylint: disable=R0201
"""
Get method.
"""
logging.info("GET Request received, creating parser...")
parser = API.parser()
logging.info("Adding arguments to parser...")
parser.add_argument('user', location='args', help='Queried user')
logging.info("Returning results...")
return ORGANISMS
@API.expect(MODEL_ORGANISM)
def post(self): #pylint: disable=R0201
"""
Post method.
"""
logging.info("POST request received, creating payload...")
new_organism = API.payload
logging.info("Setting ID of new organism to %i...", len(ORGANISMS) + 1)
new_organism["id"] = len(ORGANISMS) + 1
#ORGANISMS[0]["ATOM"] = {}
#ORGANISMS[0]["HETATM"] = {}
#ORGANISMS[0]["TER"] = {}
#ORGANISMS[0]["HELIX"] = {}
#ORGANISMS[0]["SHEET"] = {}
#ORGANISMS[0]["SSBOND"] = {}
logging.info("Creating new object organism...")
pdb_object = PDB(new_organism["organism"])
logging.info("Setting payload data...")
ORGANISMS[new_organism["organism"]] = pdb_object.pdb_dictionary
logging.info("Returning results...")
return {"result" : "organism added"}, 201
def start_api():
"""
Initialize flask framework.
"""
logging.info("Starting flask API RestPlus")
APP.run(debug=True)
| 30.835616 | 103 | 0.569525 | from __future__ import absolute_import
import logging
from flask import Flask
from flask_restplus import Api, Resource, fields
from pytom2.source.pdb_parser_module import PDB
logging.info("Initializing Flask objects...")
APP = Flask(__name__)
API = Api(APP)
logging.info("Initializing model for organism and dictionary...")
MODEL_ORGANISM = API.model("Organism", {"organism" : fields.String("Organism name")})
ORGANISMS = {}
logging.info("Initializing Swagger UI...")
APP.config["SWAGGER_UI_JSONEDITOR"] = True
@API.route('/pytom')
class Pytom(Resource):
def get(self):
logging.info("GET Request received, creating parser...")
parser = API.parser()
logging.info("Adding arguments to parser...")
parser.add_argument('user', location='args', help='Queried user')
logging.info("Returning results...")
return ORGANISMS
@API.expect(MODEL_ORGANISM)
def post(self):
logging.info("POST request received, creating payload...")
new_organism = API.payload
logging.info("Setting ID of new organism to %i...", len(ORGANISMS) + 1)
new_organism["id"] = len(ORGANISMS) + 1
logging.info("Creating new object organism...")
pdb_object = PDB(new_organism["organism"])
logging.info("Setting payload data...")
ORGANISMS[new_organism["organism"]] = pdb_object.pdb_dictionary
logging.info("Returning results...")
return {"result" : "organism added"}, 201
def start_api():
logging.info("Starting flask API RestPlus")
APP.run(debug=True)
| true | true |
f72851af1ca5aeba4f4a0a24a60e1fb3cad1b327 | 5,204 | py | Python | Sprint3 Creating Redshift Cluster.py | Jeremy-Tian/Data-Lake | 62d2aad31e924ffc536cca98001da7671a7a9fde | [
"MIT"
] | null | null | null | Sprint3 Creating Redshift Cluster.py | Jeremy-Tian/Data-Lake | 62d2aad31e924ffc536cca98001da7671a7a9fde | [
"MIT"
] | null | null | null | Sprint3 Creating Redshift Cluster.py | Jeremy-Tian/Data-Lake | 62d2aad31e924ffc536cca98001da7671a7a9fde | [
"MIT"
] | null | null | null |
import pandas as pd
import boto3
import json
import configparser
config = configparser.ConfigParser()
config.read_file(open('dwh.cfg'))
KEY = config.get('AWS','KEY')
SECRET = config.get('AWS','SECRET')
DWH_CLUSTER_TYPE = config.get("DWH","DWH_CLUSTER_TYPE")
DWH_NUM_NODES = config.get("DWH","DWH_NUM_NODES")
DWH_NODE_TYPE = config.get("DWH","DWH_NODE_TYPE")
DWH_CLUSTER_IDENTIFIER = config.get("DWH","DWH_CLUSTER_IDENTIFIER")
DWH_DB = config.get("DWH","DWH_DB")
DWH_DB_USER = config.get("DWH","DWH_DB_USER")
DWH_DB_PASSWORD = config.get("DWH","DWH_DB_PASSWORD")
DWH_PORT = config.get("DWH","DWH_PORT")
DWH_IAM_ROLE_NAME = config.get("DWH", "DWH_IAM_ROLE_NAME")
(DWH_DB_USER, DWH_DB_PASSWORD, DWH_DB)
pd.DataFrame({"Param":
["DWH_CLUSTER_TYPE", "DWH_NUM_NODES", "DWH_NODE_TYPE", "DWH_CLUSTER_IDENTIFIER", "DWH_DB", "DWH_DB_USER", "DWH_DB_PASSWORD", "DWH_PORT", "DWH_IAM_ROLE_NAME"],
"Value":
[DWH_CLUSTER_TYPE, DWH_NUM_NODES, DWH_NODE_TYPE, DWH_CLUSTER_IDENTIFIER, DWH_DB, DWH_DB_USER, DWH_DB_PASSWORD, DWH_PORT, DWH_IAM_ROLE_NAME]
})
# # Create clients for IAM, EC2, S3 and Redshift
# In[69]:
import boto3
ec2 = boto3.resource('ec2',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
s3 = boto3.resource('s3',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
iam = boto3.client('iam',aws_access_key_id=KEY,
aws_secret_access_key=SECRET,
region_name='us-west-2'
)
redshift = boto3.client('redshift',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
sampleDbBucket = s3.Bucket("awssampledbuswest2")
for obj in sampleDbBucket.objects.filter(Prefix="ssbgz"):
print(obj)
from botocore.exceptions import ClientError
#1.1 Create the role,
try:
print("1.1 Creating a new IAM Role")
dwhRole = iam.create_role(
Path='/',
RoleName=DWH_IAM_ROLE_NAME,
Description = "Allows Redshift clusters to call AWS services on your behalf.",
AssumeRolePolicyDocument=json.dumps(
{'Statement': [{'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {'Service': 'redshift.amazonaws.com'}}],
'Version': '2012-10-17'})
)
except Exception as e:
print(e)
print("1.2 Attaching Policy")
iam.attach_role_policy(RoleName=DWH_IAM_ROLE_NAME,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
)['ResponseMetadata']['HTTPStatusCode']
print("1.3 Get the IAM role ARN")
roleArn = iam.get_role(RoleName=DWH_IAM_ROLE_NAME)['Role']['Arn']
print(roleArn)
# # STEP 2: Redshift Cluster
#
# - Create a RedShift Cluster
# In[83]:
try:
response = redshift.create_cluster(
#HW
ClusterType=DWH_CLUSTER_TYPE,
NodeType=DWH_NODE_TYPE,
NumberOfNodes=int(DWH_NUM_NODES),
#Identifiers & Credentials
DBName=DWH_DB,
ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,
MasterUsername=DWH_DB_USER,
MasterUserPassword=DWH_DB_PASSWORD,
#Roles (for s3 access)
IamRoles=[roleArn]
)
except Exception as e:
print(e)
# ## 2.1 *Describe* the cluster to see its status
def prettyRedshiftProps(props):
pd.set_option('display.max_colwidth', -1)
keysToShow = ["ClusterIdentifier", "NodeType", "ClusterStatus", "MasterUsername", "DBName", "Endpoint", "NumberOfNodes", 'VpcId']
x = [(k, v) for k,v in props.items() if k in keysToShow]
return pd.DataFrame(data=x, columns=["Key", "Value"])
myClusterProps = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]
prettyRedshiftProps(myClusterProps)
# 2.2 Take note of the cluster <font color='red'> endpoint and role ARN </font> </h2>
DWH_ENDPOINT = myClusterProps['Endpoint']['Address']
DWH_ROLE_ARN = myClusterProps['IamRoles'][0]['IamRoleArn']
print("DWH_ENDPOINT :: ", endpoint)
print("DWH_ROLE_ARN :: ", roleArn)
# ## STEP 3: Open an incoming TCP port to access the cluster ednpoint
# In[84]:
try:
vpc = ec2.Vpc(id=myClusterProps['VpcId'])
defaultSg = list(vpc.security_groups.all())[0]
print(defaultSg)
defaultSg.authorize_ingress(
GroupName=defaultSg.group_name,
CidrIp='0.0.0.0/0',
IpProtocol='TCP',
FromPort=int(DWH_PORT),
ToPort=int(DWH_PORT)
)
except Exception as e:
print(e)
# # STEP 4: Make sure you can connect to the cluster
get_ipython().run_line_magic('load_ext', 'sql')
conn_string="postgresql://{}:{}@{}:{}/{}".format(DWH_DB_USER, DWH_DB_PASSWORD, DWH_ENDPOINT, DWH_PORT,DWH_DB)
print(conn_string)
get_ipython().run_line_magic('sql', '$conn_string')
| 27.246073 | 176 | 0.62548 |
import pandas as pd
import boto3
import json
import configparser
config = configparser.ConfigParser()
config.read_file(open('dwh.cfg'))
KEY = config.get('AWS','KEY')
SECRET = config.get('AWS','SECRET')
DWH_CLUSTER_TYPE = config.get("DWH","DWH_CLUSTER_TYPE")
DWH_NUM_NODES = config.get("DWH","DWH_NUM_NODES")
DWH_NODE_TYPE = config.get("DWH","DWH_NODE_TYPE")
DWH_CLUSTER_IDENTIFIER = config.get("DWH","DWH_CLUSTER_IDENTIFIER")
DWH_DB = config.get("DWH","DWH_DB")
DWH_DB_USER = config.get("DWH","DWH_DB_USER")
DWH_DB_PASSWORD = config.get("DWH","DWH_DB_PASSWORD")
DWH_PORT = config.get("DWH","DWH_PORT")
DWH_IAM_ROLE_NAME = config.get("DWH", "DWH_IAM_ROLE_NAME")
(DWH_DB_USER, DWH_DB_PASSWORD, DWH_DB)
pd.DataFrame({"Param":
["DWH_CLUSTER_TYPE", "DWH_NUM_NODES", "DWH_NODE_TYPE", "DWH_CLUSTER_IDENTIFIER", "DWH_DB", "DWH_DB_USER", "DWH_DB_PASSWORD", "DWH_PORT", "DWH_IAM_ROLE_NAME"],
"Value":
[DWH_CLUSTER_TYPE, DWH_NUM_NODES, DWH_NODE_TYPE, DWH_CLUSTER_IDENTIFIER, DWH_DB, DWH_DB_USER, DWH_DB_PASSWORD, DWH_PORT, DWH_IAM_ROLE_NAME]
})
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
s3 = boto3.resource('s3',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
iam = boto3.client('iam',aws_access_key_id=KEY,
aws_secret_access_key=SECRET,
region_name='us-west-2'
)
redshift = boto3.client('redshift',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
sampleDbBucket = s3.Bucket("awssampledbuswest2")
for obj in sampleDbBucket.objects.filter(Prefix="ssbgz"):
print(obj)
from botocore.exceptions import ClientError
try:
print("1.1 Creating a new IAM Role")
dwhRole = iam.create_role(
Path='/',
RoleName=DWH_IAM_ROLE_NAME,
Description = "Allows Redshift clusters to call AWS services on your behalf.",
AssumeRolePolicyDocument=json.dumps(
{'Statement': [{'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {'Service': 'redshift.amazonaws.com'}}],
'Version': '2012-10-17'})
)
except Exception as e:
print(e)
print("1.2 Attaching Policy")
iam.attach_role_policy(RoleName=DWH_IAM_ROLE_NAME,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
)['ResponseMetadata']['HTTPStatusCode']
print("1.3 Get the IAM role ARN")
roleArn = iam.get_role(RoleName=DWH_IAM_ROLE_NAME)['Role']['Arn']
print(roleArn)
redshift.create_cluster(
ClusterType=DWH_CLUSTER_TYPE,
NodeType=DWH_NODE_TYPE,
NumberOfNodes=int(DWH_NUM_NODES),
DBName=DWH_DB,
ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,
MasterUsername=DWH_DB_USER,
MasterUserPassword=DWH_DB_PASSWORD,
IamRoles=[roleArn]
)
except Exception as e:
print(e)
Show = ["ClusterIdentifier", "NodeType", "ClusterStatus", "MasterUsername", "DBName", "Endpoint", "NumberOfNodes", 'VpcId']
x = [(k, v) for k,v in props.items() if k in keysToShow]
return pd.DataFrame(data=x, columns=["Key", "Value"])
myClusterProps = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]
prettyRedshiftProps(myClusterProps)
DWH_ENDPOINT = myClusterProps['Endpoint']['Address']
DWH_ROLE_ARN = myClusterProps['IamRoles'][0]['IamRoleArn']
print("DWH_ENDPOINT :: ", endpoint)
print("DWH_ROLE_ARN :: ", roleArn)
aultSg.authorize_ingress(
GroupName=defaultSg.group_name,
CidrIp='0.0.0.0/0',
IpProtocol='TCP',
FromPort=int(DWH_PORT),
ToPort=int(DWH_PORT)
)
except Exception as e:
print(e)
)
conn_string="postgresql://{}:{}@{}:{}/{}".format(DWH_DB_USER, DWH_DB_PASSWORD, DWH_ENDPOINT, DWH_PORT,DWH_DB)
print(conn_string)
get_ipython().run_line_magic('sql', '$conn_string')
| true | true |
f7285218ec2efc269a29428d46bab2143b47e537 | 1,737 | py | Python | AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_abstract_numbers.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 2,757 | 2018-04-28T21:41:36.000Z | 2022-03-29T06:33:36.000Z | AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_abstract_numbers.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 20 | 2019-07-23T15:29:32.000Z | 2022-01-21T12:53:04.000Z | AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_abstract_numbers.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 449 | 2018-05-09T05:54:05.000Z | 2022-03-30T14:54:18.000Z | """Unit tests for numbers.py."""
import math
import unittest
from numbers import Complex, Real, Rational, Integral
from test import test_support
class TestNumbers(unittest.TestCase):
def test_int(self):
self.assertTrue(issubclass(int, Integral))
self.assertTrue(issubclass(int, Complex))
self.assertEqual(7, int(7).real)
self.assertEqual(0, int(7).imag)
self.assertEqual(7, int(7).conjugate())
self.assertEqual(7, int(7).numerator)
self.assertEqual(1, int(7).denominator)
def test_long(self):
self.assertTrue(issubclass(long, Integral))
self.assertTrue(issubclass(long, Complex))
self.assertEqual(7, long(7).real)
self.assertEqual(0, long(7).imag)
self.assertEqual(7, long(7).conjugate())
self.assertEqual(7, long(7).numerator)
self.assertEqual(1, long(7).denominator)
def test_float(self):
self.assertFalse(issubclass(float, Rational))
self.assertTrue(issubclass(float, Real))
self.assertEqual(7.3, float(7.3).real)
self.assertEqual(0, float(7.3).imag)
self.assertEqual(7.3, float(7.3).conjugate())
def test_complex(self):
self.assertFalse(issubclass(complex, Real))
self.assertTrue(issubclass(complex, Complex))
c1, c2 = complex(3, 2), complex(4,1)
# XXX: This is not ideal, but see the comment in math_trunc().
self.assertRaises(AttributeError, math.trunc, c1)
self.assertRaises(TypeError, float, c1)
self.assertRaises(TypeError, int, c1)
def test_main():
test_support.run_unittest(TestNumbers)
if __name__ == "__main__":
unittest.main()
| 32.773585 | 71 | 0.64076 |
import math
import unittest
from numbers import Complex, Real, Rational, Integral
from test import test_support
class TestNumbers(unittest.TestCase):
def test_int(self):
self.assertTrue(issubclass(int, Integral))
self.assertTrue(issubclass(int, Complex))
self.assertEqual(7, int(7).real)
self.assertEqual(0, int(7).imag)
self.assertEqual(7, int(7).conjugate())
self.assertEqual(7, int(7).numerator)
self.assertEqual(1, int(7).denominator)
def test_long(self):
self.assertTrue(issubclass(long, Integral))
self.assertTrue(issubclass(long, Complex))
self.assertEqual(7, long(7).real)
self.assertEqual(0, long(7).imag)
self.assertEqual(7, long(7).conjugate())
self.assertEqual(7, long(7).numerator)
self.assertEqual(1, long(7).denominator)
def test_float(self):
self.assertFalse(issubclass(float, Rational))
self.assertTrue(issubclass(float, Real))
self.assertEqual(7.3, float(7.3).real)
self.assertEqual(0, float(7.3).imag)
self.assertEqual(7.3, float(7.3).conjugate())
def test_complex(self):
self.assertFalse(issubclass(complex, Real))
self.assertTrue(issubclass(complex, Complex))
c1, c2 = complex(3, 2), complex(4,1)
self.assertRaises(AttributeError, math.trunc, c1)
self.assertRaises(TypeError, float, c1)
self.assertRaises(TypeError, int, c1)
def test_main():
test_support.run_unittest(TestNumbers)
if __name__ == "__main__":
unittest.main()
| true | true |
f7285294d2a931523127fa75c792a0d89962c046 | 26 | py | Python | tiktok.py | yalinsili/dsadasdas | 9431370fae53a1fd0d9de9c4c6964c071c7c24a1 | [
"Apache-2.0"
] | null | null | null | tiktok.py | yalinsili/dsadasdas | 9431370fae53a1fd0d9de9c4c6964c071c7c24a1 | [
"Apache-2.0"
] | null | null | null | tiktok.py | yalinsili/dsadasdas | 9431370fae53a1fd0d9de9c4c6964c071c7c24a1 | [
"Apache-2.0"
] | null | null | null | while 1==1:
import bot | 13 | 14 | 0.615385 | while 1==1:
import bot | true | true |
f72852a942e1a5b7682e55b1b0f064fb0bffdf7a | 2,640 | py | Python | libqtile/widget/windowname.py | dequis/qtile | 560e7230016acf4ca47c7c539bb65479085e2019 | [
"MIT"
] | null | null | null | libqtile/widget/windowname.py | dequis/qtile | 560e7230016acf4ca47c7c539bb65479085e2019 | [
"MIT"
] | null | null | null | libqtile/widget/windowname.py | dequis/qtile | 560e7230016acf4ca47c7c539bb65479085e2019 | [
"MIT"
] | null | null | null | # Copyright (c) 2008, 2010 Aldo Cortesi
# Copyright (c) 2010 matt
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2012 Tim Neumann
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .. import hook, bar
from . import base
class WindowName(base._TextBox):
"""Displays the name of the window that currently has focus"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('show_state', True, 'show window status before window name')
]
def __init__(self, width=bar.STRETCH, **config):
base._TextBox.__init__(self, width=width, **config)
self.add_defaults(WindowName.defaults)
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
hook.subscribe.window_name_change(self.update)
hook.subscribe.focus_change(self.update)
hook.subscribe.float_change(self.update)
# Clear the widget if group has no window
@hook.subscribe.client_killed
def on_client_killed(window):
if window == self.bar.screen.group.currentWindow:
self.text = ""
self.bar.draw()
def update(self):
w = self.bar.screen.group.currentWindow
state = ''
if self.show_state and w is not None:
if w.maximized:
state = '[] '
elif w.minimized:
state = '_ '
elif w.floating:
state = 'V '
self.text = "%s%s" % (state, w.name if w and w.name else " ")
self.bar.draw()
| 40 | 79 | 0.680682 |
from .. import hook, bar
from . import base
class WindowName(base._TextBox):
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('show_state', True, 'show window status before window name')
]
def __init__(self, width=bar.STRETCH, **config):
base._TextBox.__init__(self, width=width, **config)
self.add_defaults(WindowName.defaults)
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
hook.subscribe.window_name_change(self.update)
hook.subscribe.focus_change(self.update)
hook.subscribe.float_change(self.update)
@hook.subscribe.client_killed
def on_client_killed(window):
if window == self.bar.screen.group.currentWindow:
self.text = ""
self.bar.draw()
def update(self):
w = self.bar.screen.group.currentWindow
state = ''
if self.show_state and w is not None:
if w.maximized:
state = '[] '
elif w.minimized:
state = '_ '
elif w.floating:
state = 'V '
self.text = "%s%s" % (state, w.name if w and w.name else " ")
self.bar.draw()
| true | true |
f72853030445b587ffb032857bffca4fd08ac3e8 | 335 | py | Python | invenio_subjects_lcsh/__init__.py | fenekku/invenio-subjects-lcsh | e6070b6c4cbedc3ade7a06d5dcb9ba039a2ed3ae | [
"MIT"
] | null | null | null | invenio_subjects_lcsh/__init__.py | fenekku/invenio-subjects-lcsh | e6070b6c4cbedc3ade7a06d5dcb9ba039a2ed3ae | [
"MIT"
] | null | null | null | invenio_subjects_lcsh/__init__.py | fenekku/invenio-subjects-lcsh | e6070b6c4cbedc3ade7a06d5dcb9ba039a2ed3ae | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Northwestern University.
#
# invenio-subjects-lcsh is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""MeSH subject terms for InvenioRDM."""
from .version import __version__
__all__ = ('__version__')
| 23.928571 | 73 | 0.725373 |
from .version import __version__
__all__ = ('__version__')
| true | true |
f72853cad0f83808389d02847ac165bae9c66050 | 21,066 | py | Python | autokeras/task.py | parthpatwa/autokeras | 2b23d870e91afdd2bc12663ff6e00e9df9ef855c | [
"MIT"
] | 1 | 2019-10-28T13:44:11.000Z | 2019-10-28T13:44:11.000Z | autokeras/task.py | parthpatwa/autokeras | 2b23d870e91afdd2bc12663ff6e00e9df9ef855c | [
"MIT"
] | null | null | null | autokeras/task.py | parthpatwa/autokeras | 2b23d870e91afdd2bc12663ff6e00e9df9ef855c | [
"MIT"
] | null | null | null | import pandas as pd
from autokeras import auto_model
from autokeras.hypermodel import head
from autokeras.hypermodel import node
class SupervisedImagePipeline(auto_model.AutoModel):
def __init__(self, outputs, **kwargs):
super().__init__(inputs=node.ImageInput(),
outputs=outputs,
**kwargs)
class ImageClassifier(SupervisedImagePipeline):
"""AutoKeras image classification class.
# Arguments
num_classes: Int. Defaults to None. If None, it will infer from the data.
multi_label: Boolean. Defaults to False.
loss: A Keras loss function. Defaults to use 'binary_crossentropy' or
'categorical_crossentropy' based on the number of classes.
metrics: A list of Keras metrics. Defaults to use 'accuracy'.
name: String. The name of the AutoModel. Defaults to 'image_classifier'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to 100.
directory: String. The path to a directory for storing the search outputs.
Defaults to None, which would create a folder with the name of the
AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
seed: Int. Random seed.
"""
def __init__(self,
num_classes=None,
multi_label=False,
loss=None,
metrics=None,
name='image_classifier',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.ClassificationHead(num_classes=num_classes,
multi_label=multi_label,
loss=loss,
metrics=metrics),
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
class ImageRegressor(SupervisedImagePipeline):
"""AutoKeras image regression class.
# Arguments
output_dim: Int. The number of output dimensions. Defaults to None.
If None, it will infer from the data.
multi_label: Boolean. Defaults to False.
loss: A Keras loss function. Defaults to use 'mean_squared_error'.
metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.
name: String. The name of the AutoModel. Defaults to 'image_regressor'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to 100.
directory: String. The path to a directory for storing the search outputs.
Defaults to None, which would create a folder with the name of the
AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
seed: Int. Random seed.
"""
def __init__(self,
output_dim=None,
loss=None,
metrics=None,
name='image_regressor',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.RegressionHead(output_dim=output_dim,
loss=loss,
metrics=metrics),
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
class SupervisedTextPipeline(auto_model.AutoModel):
def __init__(self, outputs, **kwargs):
super().__init__(inputs=node.TextInput(),
outputs=outputs,
**kwargs)
class TextClassifier(SupervisedTextPipeline):
"""AutoKeras text classification class.
# Arguments
num_classes: Int. Defaults to None. If None, it will infer from the data.
multi_label: Boolean. Defaults to False.
loss: A Keras loss function. Defaults to use 'binary_crossentropy' or
'categorical_crossentropy' based on the number of classes.
metrics: A list of Keras metrics. Defaults to use 'accuracy'.
name: String. The name of the AutoModel. Defaults to 'text_classifier'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to 100.
directory: String. The path to a directory for storing the search outputs.
Defaults to None, which would create a folder with the name of the
AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
seed: Int. Random seed.
"""
def __init__(self,
num_classes=None,
multi_label=False,
loss=None,
metrics=None,
name='text_classifier',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.ClassificationHead(num_classes=num_classes,
multi_label=multi_label,
loss=loss,
metrics=metrics),
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
class TextRegressor(SupervisedTextPipeline):
"""AutoKeras text regression class.
# Arguments
output_dim: Int. The number of output dimensions. Defaults to None.
If None, it will infer from the data.
multi_label: Boolean. Defaults to False.
loss: A Keras loss function. Defaults to use 'mean_squared_error'.
metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.
name: String. The name of the AutoModel. Defaults to 'text_regressor'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to 100.
directory: String. The path to a directory for storing the search outputs.
Defaults to None, which would create a folder with the name of the
AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
seed: Int. Random seed.
"""
def __init__(self,
output_dim=None,
loss=None,
metrics=None,
name='text_regressor',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.RegressionHead(output_dim=output_dim,
loss=loss,
metrics=metrics),
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
class SupervisedStructuredDataPipeline(auto_model.AutoModel):
def __init__(self, outputs, column_names, column_types, **kwargs):
inputs = node.StructuredDataInput()
inputs.column_types = column_types
inputs.column_names = column_names
if column_types:
for column_type in column_types.values():
if column_type not in ['categorical', 'numerical']:
raise ValueError(
'Column_types should be either "categorical" '
'or "numerical", but got {name}'.format(name=column_type))
if column_names and column_types:
for column_name in column_types:
if column_name not in column_names:
raise ValueError('Column_names and column_types are '
'mismatched. Cannot find column name '
'{name} in the data.'.format(name=column_name))
super().__init__(inputs=inputs,
outputs=outputs,
**kwargs)
self._target_col_name = None
def _read_from_csv(self, x, y):
df = pd.read_csv(x)
target = df.pop(y).to_numpy()
return df, target
def fit(self,
x=None,
y=None,
epochs=None,
callbacks=None,
validation_split=0,
validation_data=None,
**kwargs):
"""Search for the best model and hyperparameters for the task.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Training data x. If the data is from a csv file, it should be a
string specifying the path of the csv file of the training data.
y: String, numpy.ndarray, or tensorflow.Dataset. Training data y.
If the data is from a csv file, it should be a string corresponding
to the label column.
epochs: Int. The number of epochs to train each model during the search.
If unspecified, we would use epochs equal to 1000 and early stopping
with patience equal to 30.
callbacks: List of Keras callbacks to apply during training and
validation.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling. This argument is
not supported when `x` is a dataset.
The best model found would be fit on the entire dataset including the
validation data.
validation_data: Data on which to evaluate the loss and any model metrics
at the end of each epoch. The model will not be trained on this data.
`validation_data` will override `validation_split`. The type of the
validation data should be the same as the training data.
The best model found would be fit on the training dataset without the
validation data.
**kwargs: Any arguments supported by keras.Model.fit.
"""
# x is file path of training data
if isinstance(x, str):
self._target_column_name = y
x, y = self._read_from_csv(x, y)
if validation_data:
x_val, y_val = validation_data
if isinstance(x_val, str):
validation_data = self._read_from_csv(x_val, y_val)
super().fit(x=x,
y=y,
epochs=epochs,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
**kwargs)
def predict(self, x, batch_size=32, **kwargs):
"""Predict the output for a given testing data.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Testing data x. If the data is from a csv file, it should be a
string specifying the path of the csv file of the testing data.
batch_size: Int. Defaults to 32.
**kwargs: Any arguments supported by keras.Model.predict.
# Returns
A list of numpy.ndarray objects or a single numpy.ndarray.
The predicted results.
"""
if isinstance(x, str):
x = pd.read_csv(x)
if self._target_col_name in x:
x.pop(self._target_col_name)
return super().predict(x=x,
batch_size=batch_size,
**kwargs)
def evaluate(self, x, y=None, batch_size=32, **kwargs):
"""Evaluate the best model for the given data.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Testing data x. If the data is from a csv file, it should be a
string specifying the path of the csv file of the testing data.
y: String, numpy.ndarray, or tensorflow.Dataset. Testing data y.
If the data is from a csv file, it should be a string corresponding
to the label column.
batch_size: Int. Defaults to 32.
**kwargs: Any arguments supported by keras.Model.evaluate.
# Returns
Scalar test loss (if the model has a single output and no metrics) or
list of scalars (if the model has multiple outputs and/or metrics).
The attribute model.metrics_names will give you the display labels for
the scalar outputs.
"""
if isinstance(x, str):
x, y = self._read_from_csv(x, y)
return super().evaluate(x=x,
y=y,
batch_size=batch_size,
**kwargs)
class StructuredDataClassifier(SupervisedStructuredDataPipeline):
"""AutoKeras structured data classification class.
# Arguments
column_names: A list of strings specifying the names of the columns. The
length of the list should be equal to the number of columns of the data.
Defaults to None. If None, it will obtained from the header of the csv
file or the pandas.DataFrame.
column_types: Dict. The keys are the column names. The values should either
be 'numerical' or 'categorical', indicating the type of that column.
Defaults to None. If not None, the column_names need to be specified.
If None, it will be inferred from the data.
num_classes: Int. Defaults to None. If None, it will infer from the data.
multi_label: Boolean. Defaults to False.
loss: A Keras loss function. Defaults to use 'binary_crossentropy' or
'categorical_crossentropy' based on the number of classes.
metrics: A list of Keras metrics. Defaults to use 'accuracy'.
name: String. The name of the AutoModel. Defaults to
'structured_data_classifier'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to 100.
directory: String. The path to a directory for storing the search outputs.
Defaults to None, which would create a folder with the name of the
AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize. Defaults to 'val_accuracy'.
seed: Int. Random seed.
"""
def __init__(self,
column_names=None,
column_types=None,
num_classes=None,
multi_label=False,
loss=None,
metrics=None,
name='structured_data_classifier',
max_trials=100,
directory=None,
objective='val_accuracy',
seed=None):
super().__init__(
outputs=head.ClassificationHead(num_classes=num_classes,
multi_label=multi_label,
loss=loss,
metrics=metrics),
column_names=column_names,
column_types=column_types,
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
def fit(self,
x=None,
y=None,
epochs=None,
callbacks=None,
validation_split=0,
validation_data=None,
**kwargs):
"""Search for the best model and hyperparameters for the task.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Training data x. If the data is from a csv file, it should be a
string specifying the path of the csv file of the training data.
y: String, numpy.ndarray, or tensorflow.Dataset. Training data y.
If the data is from a csv file, it should be a string corresponding
to the label column.
epochs: Int. The number of epochs to train each model during the search.
If unspecified, we would use epochs equal to 1000 and early stopping
with patience equal to 30.
callbacks: List of Keras callbacks to apply during training and
validation.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling. This argument is
not supported when `x` is a dataset.
validation_data: Data on which to evaluate the loss and any model metrics
at the end of each epoch. The model will not be trained on this data.
`validation_data` will override `validation_split`. The type of the
validation data should be the same as the training data.
**kwargs: Any arguments supported by keras.Model.fit.
"""
super().fit(x=x,
y=y,
epochs=epochs,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
**kwargs)
class StructuredDataRegressor(SupervisedStructuredDataPipeline):
"""AutoKeras structured data regression class.
# Arguments
column_names: A list of strings specifying the names of the columns. The
length of the list should be equal to the number of columns of the data.
Defaults to None. If None, it will obtained from the header of the csv
file or the pandas.DataFrame.
column_types: Dict. The keys are the column names. The values should either
be 'numerical' or 'categorical', indicating the type of that column.
Defaults to None. If not None, the column_names need to be specified.
If None, it will be inferred from the data.
loss: A Keras loss function. Defaults to use 'mean_squared_error'.
metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to 100.
directory: String. The path to a directory for storing the search outputs.
Defaults to None, which would create a folder with the name of the
AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
seed: Int. Random seed.
"""
def __init__(self,
column_names=None,
column_types=None,
output_dim=None,
loss=None,
metrics=None,
name='structured_data_regressor',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.RegressionHead(output_dim=output_dim,
loss=loss,
metrics=metrics),
column_names=column_names,
column_types=column_types,
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
| 44.821277 | 85 | 0.580651 | import pandas as pd
from autokeras import auto_model
from autokeras.hypermodel import head
from autokeras.hypermodel import node
class SupervisedImagePipeline(auto_model.AutoModel):
def __init__(self, outputs, **kwargs):
super().__init__(inputs=node.ImageInput(),
outputs=outputs,
**kwargs)
class ImageClassifier(SupervisedImagePipeline):
def __init__(self,
num_classes=None,
multi_label=False,
loss=None,
metrics=None,
name='image_classifier',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.ClassificationHead(num_classes=num_classes,
multi_label=multi_label,
loss=loss,
metrics=metrics),
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
class ImageRegressor(SupervisedImagePipeline):
def __init__(self,
output_dim=None,
loss=None,
metrics=None,
name='image_regressor',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.RegressionHead(output_dim=output_dim,
loss=loss,
metrics=metrics),
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
class SupervisedTextPipeline(auto_model.AutoModel):
def __init__(self, outputs, **kwargs):
super().__init__(inputs=node.TextInput(),
outputs=outputs,
**kwargs)
class TextClassifier(SupervisedTextPipeline):
def __init__(self,
num_classes=None,
multi_label=False,
loss=None,
metrics=None,
name='text_classifier',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.ClassificationHead(num_classes=num_classes,
multi_label=multi_label,
loss=loss,
metrics=metrics),
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
class TextRegressor(SupervisedTextPipeline):
def __init__(self,
output_dim=None,
loss=None,
metrics=None,
name='text_regressor',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.RegressionHead(output_dim=output_dim,
loss=loss,
metrics=metrics),
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
class SupervisedStructuredDataPipeline(auto_model.AutoModel):
def __init__(self, outputs, column_names, column_types, **kwargs):
inputs = node.StructuredDataInput()
inputs.column_types = column_types
inputs.column_names = column_names
if column_types:
for column_type in column_types.values():
if column_type not in ['categorical', 'numerical']:
raise ValueError(
'Column_types should be either "categorical" '
'or "numerical", but got {name}'.format(name=column_type))
if column_names and column_types:
for column_name in column_types:
if column_name not in column_names:
raise ValueError('Column_names and column_types are '
'mismatched. Cannot find column name '
'{name} in the data.'.format(name=column_name))
super().__init__(inputs=inputs,
outputs=outputs,
**kwargs)
self._target_col_name = None
def _read_from_csv(self, x, y):
df = pd.read_csv(x)
target = df.pop(y).to_numpy()
return df, target
def fit(self,
x=None,
y=None,
epochs=None,
callbacks=None,
validation_split=0,
validation_data=None,
**kwargs):
if isinstance(x, str):
self._target_column_name = y
x, y = self._read_from_csv(x, y)
if validation_data:
x_val, y_val = validation_data
if isinstance(x_val, str):
validation_data = self._read_from_csv(x_val, y_val)
super().fit(x=x,
y=y,
epochs=epochs,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
**kwargs)
def predict(self, x, batch_size=32, **kwargs):
if isinstance(x, str):
x = pd.read_csv(x)
if self._target_col_name in x:
x.pop(self._target_col_name)
return super().predict(x=x,
batch_size=batch_size,
**kwargs)
def evaluate(self, x, y=None, batch_size=32, **kwargs):
if isinstance(x, str):
x, y = self._read_from_csv(x, y)
return super().evaluate(x=x,
y=y,
batch_size=batch_size,
**kwargs)
class StructuredDataClassifier(SupervisedStructuredDataPipeline):
def __init__(self,
column_names=None,
column_types=None,
num_classes=None,
multi_label=False,
loss=None,
metrics=None,
name='structured_data_classifier',
max_trials=100,
directory=None,
objective='val_accuracy',
seed=None):
super().__init__(
outputs=head.ClassificationHead(num_classes=num_classes,
multi_label=multi_label,
loss=loss,
metrics=metrics),
column_names=column_names,
column_types=column_types,
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
def fit(self,
x=None,
y=None,
epochs=None,
callbacks=None,
validation_split=0,
validation_data=None,
**kwargs):
super().fit(x=x,
y=y,
epochs=epochs,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
**kwargs)
class StructuredDataRegressor(SupervisedStructuredDataPipeline):
def __init__(self,
column_names=None,
column_types=None,
output_dim=None,
loss=None,
metrics=None,
name='structured_data_regressor',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.RegressionHead(output_dim=output_dim,
loss=loss,
metrics=metrics),
column_names=column_names,
column_types=column_types,
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
| true | true |
f72853fef919de8ffd86762057a90fb3082a1d2d | 56 | py | Python | nempy/__init__.py | bje-/nempy | 3a3c30d6e7aa203e1f09cdd826e65967ad214a2c | [
"BSD-3-Clause"
] | null | null | null | nempy/__init__.py | bje-/nempy | 3a3c30d6e7aa203e1f09cdd826e65967ad214a2c | [
"BSD-3-Clause"
] | null | null | null | nempy/__init__.py | bje-/nempy | 3a3c30d6e7aa203e1f09cdd826e65967ad214a2c | [
"BSD-3-Clause"
] | null | null | null | from nempy import markets, historical_spot_market_inputs | 56 | 56 | 0.910714 | from nempy import markets, historical_spot_market_inputs | true | true |
f72854cf53d8f8b51f8d99a44f125330be2d13af | 1,069 | py | Python | atest/robot/cli/console/expected_output/ExpectedOutputLibrary.py | phil-davis/robotframework | 4d4ce686cbe01e293bb86ea6ff34330e8c45fc43 | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2020-09-13T08:56:49.000Z | 2021-01-10T11:21:34.000Z | atest/robot/cli/console/expected_output/ExpectedOutputLibrary.py | phil-davis/robotframework | 4d4ce686cbe01e293bb86ea6ff34330e8c45fc43 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-10-15T19:39:58.000Z | 2020-10-15T19:41:03.000Z | atest/robot/cli/console/expected_output/ExpectedOutputLibrary.py | phil-davis/robotframework | 4d4ce686cbe01e293bb86ea6ff34330e8c45fc43 | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2016-02-29T15:42:22.000Z | 2018-05-08T08:58:18.000Z | from os.path import abspath, dirname, join
from fnmatch import fnmatchcase
from operator import eq
from robot.api import logger
CURDIR = dirname(abspath(__file__))
def output_should_be(actual, expected, **replaced):
actual = _read_file(actual, 'Actual')
expected = _read_file(join(CURDIR, expected), 'Expected', replaced)
if len(expected) != len(actual):
raise AssertionError('Lengths differ. Expected %d lines but got %d'
% (len(expected), len(actual)))
for exp, act in zip(expected, actual):
tester = fnmatchcase if '*' in exp else eq
if not tester(act.rstrip(), exp.rstrip()):
raise AssertionError('Lines differ.\nExpected: %s\nActual: %s'
% (exp, act))
def _read_file(path, title, replaced=None):
with open(path) as file:
content = file.read()
if replaced:
for item in replaced:
content = content.replace(item, replaced[item])
logger.debug('%s:\n%s' % (title, content))
return content.splitlines()
| 33.40625 | 76 | 0.627689 | from os.path import abspath, dirname, join
from fnmatch import fnmatchcase
from operator import eq
from robot.api import logger
CURDIR = dirname(abspath(__file__))
def output_should_be(actual, expected, **replaced):
actual = _read_file(actual, 'Actual')
expected = _read_file(join(CURDIR, expected), 'Expected', replaced)
if len(expected) != len(actual):
raise AssertionError('Lengths differ. Expected %d lines but got %d'
% (len(expected), len(actual)))
for exp, act in zip(expected, actual):
tester = fnmatchcase if '*' in exp else eq
if not tester(act.rstrip(), exp.rstrip()):
raise AssertionError('Lines differ.\nExpected: %s\nActual: %s'
% (exp, act))
def _read_file(path, title, replaced=None):
with open(path) as file:
content = file.read()
if replaced:
for item in replaced:
content = content.replace(item, replaced[item])
logger.debug('%s:\n%s' % (title, content))
return content.splitlines()
| true | true |
f72858863e51544c1e5bb7067e1e7750dccf22c1 | 18,799 | py | Python | tensorflow/python/debug/wrappers/framework.py | bhbai/tensorflow | d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3 | [
"Apache-2.0"
] | 13 | 2017-01-17T07:48:00.000Z | 2022-03-09T09:43:59.000Z | tensorflow/python/debug/wrappers/framework.py | bhbai/tensorflow | d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3 | [
"Apache-2.0"
] | 1 | 2017-02-06T08:12:22.000Z | 2017-02-06T08:12:22.000Z | tensorflow/python/debug/wrappers/framework.py | bhbai/tensorflow | d4b5c606fc9fbd1a20b5b113b4bc831f31d889a3 | [
"Apache-2.0"
] | 3 | 2017-05-12T03:45:56.000Z | 2018-02-26T04:33:28.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug wrapper sessions.
A debug wrapper session is a wrapper around a TensorFlow Python Session.
The wrapper preserves the Session interface, most importantly the run() method,
while providing abilities to:
a) Intercept a run() call to a wrapped session and insert debug tensor watches
according to externally-specified debug URLs.
b) Release control to an external (i.e., non-Session) object before and after
the run() call, so that the external object can perform actions such as
launching a UI to let users inspect the intermediate tensors and partition
graphs from the run() call.
c) (To be implemented) Intercept a run() call and give control to DebugStepper
to let it perform stepping / continuing-to actions on the graph.
b) (To be implemented in a future CL) Enter an instruction loop to let an
external object (e.g., remote client) launch run() and cont() calls
remotely.
*** The lifetime of a debug wrapper session: ***
1) The wrapper session is created by calling the constructor with a
wrapped (normal) session as the argument:
wrapper = FooDebugWrapperSession(sess)
wherein FooDebugWrapperSession is a concrete subclass implementing the
abstract BaseDebugWrapperSession class below.
2) Near the end of the constructor call, the on_session_init() callback is
invoked, with a OnSessionInitRequest object as the argument. The object
carries the wrapped (normal) session object.
3) The callback handles the request and returns a OnSessionInitResponse
object with an action field, directing the wrapper session what to do next.
If the action field in the OnSessionInitResponse is PROCEED, the constuctor
returns. Control is released back to the caller of the constructor, which can
invoke run() method of wrapper session with the same syntax as a non-wrapped
session, e.g.,:
wrapper.run(fetches, feed_dict=feeds, options=run_options)
Below, A1 - A2 is the lifetime of a wrapper run() call if the action is
PROCEED:
A1) Right at the start of each run() call, the on_run_start() callback is
invoked, with an OnRunStartRequest object carrying information such as
the fetches, the feed dict, the run options and run metadata used in
this run call, along with a count of how many run calls has occurred
on this wrapper session. The callback then returns an OnRunStartResponse
object, of which the action field directs what the wrapper session
actually will do of the run() call.
If the action is DEBUG_RUN, a debugged (tensor-watched) run will ensue,
with the debug URLs supplied in the debug_urls field of the response.
These can be file:// or grpc:// URLs, for example.
If the action is NON_DEBUG_RUN, a non-debug (normal) run will ensue.
If the action is INVOKE_STEPPER, no run() call will be issued to the
wrapped session. But instead, a DebugStepper (i.e., "continuation
debugger") will be used to perform stepping / continue-to actions on
the graph.
TODO(cais): The event loop for the DebugStepper will request additional
callbacks including on_cont_start() and on_cont_end(). Add those.
A2) Right before the run() returns, the on_run_end() callback is invoked,
with an OnRunEndRequest object as the argument, which carries information
including the actual action performed in the warpper run() call and the
run_metadata from the run() call.
However, if the action field in OnSessionInitResponse is
REMOTE_INSTR_LOOP, the constructor will automatically invoke an instruction loop
that gives the control to a remote caller.
In the remote instruction loop, the following steps will happen:
B1) Callback on_instr_start() is invoked. The callback will return an
OnInstrStartResponse object with an action field which can order one of
the following actions:
i) a run() call with fetches, feeds and debug_urls specified.
ii) a DebugStepper cont() call with target specified.
iii) value overrides in the cached tensors from the DebugStepper.
iv) exit the instruction loop.
B2) The wrapper session carries out the action specified above.
B3) If still in the instruction loop, the wrapper session invokes the
on_instr_end() callback. After the on_instr_end() callback returns, jump
back to B1.
TODO(cais): Implemented the instruction loop in B1 - B3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug import debug_utils
from tensorflow.python.debug import stepper
from tensorflow.python.framework import errors
# Helper function.
def _check_type(obj, expected_type):
"""Check if an object is of the expected type.
Args:
obj: The object being checked.
expected_type: (type) The expected type of obj.
Raises:
TypeError: If obj is not an instance of expected_type.
"""
if not isinstance(obj, expected_type):
raise TypeError("Expected type %s; got type %s" %
(expected_type, type(obj)))
class OnSessionInitRequest(object):
"""Request to an on-session-init callback.
This callback is invoked during the __init__ call to a debug-wrapper session.
"""
def __init__(self, sess):
"""Constructor.
Args:
sess: A tensorflow Session object.
"""
_check_type(sess, session.BaseSession)
self.session = sess
class OnSessionInitAction(object):
"""Enum-like values for possible action to take on session init."""
# Proceed, without special actions, in the wrapper session initialization.
# What action the wrapper session performs next is determined by the caller
# of the wrapper session. E.g., it can call run().
PROCEED = "proceed"
# Instead of letting the caller of the wrapper session determine what actions
# the wrapper session will perform next, enter a loop to receive instructions
# from a remote client.
# For example, TensorBoard visual debugger can use this action so that it can
# launch session.run() calls remotely.
REMOTE_INSTR_LOOP = "remote_instr_loop"
class OnSessionInitResponse(object):
"""Response from an on-session-init callback."""
def __init__(self, action):
"""Constructor.
Args:
action: (`OnSessionInitAction`) Debugger action to take on session init.
"""
_check_type(action, str)
self.action = action
class OnRunStartRequest(object):
"""Request to an on-run-start callback.
This callback is invoked during a run() call of the debug-wrapper
session, immediately after the run() call counter is incremented.
"""
def __init__(self, fetches, feed_dict, run_options, run_metadata,
run_call_count):
"""Constructor of `OnRunStartRequest`.
Args:
fetches: Fetch targets of the run() call.
feed_dict: The feed dictionary to the run() call.
run_options: RunOptions input to the run() call.
run_metadata: RunMetadata input to the run() call.
The above four arguments are identical to the input arguments to the
run() method of a non-wrapped TensorFlow session.
run_call_count: 1-based count of how many run calls (including this one)
has been invoked.
"""
self.fetches = fetches
self.feed_dict = feed_dict
self.run_options = run_options
self.run_metadata = run_metadata
self.run_call_count = run_call_count
class OnRunStartAction(object):
"""Enum-like values for possible action to take on start of a run() call."""
# Run once with debug tensor-watching.
DEBUG_RUN = "debug_run"
# Run without debug tensor-watching.
NON_DEBUG_RUN = "non_debug_run"
# Instead of running the fetches as a whole, as would normally happen, invoke
# the (to-be-implemented) debug stepper.
# TODO(cais): Remove "to-be-implemented".
INVOKE_STEPPER = "invoke_stepper"
class OnRunStartResponse(object):
"""Request from an on-run-start callback.
The caller of the callback can use this response object to specify what
action the debug-wrapper session actually takes on the run() call.
"""
def __init__(self, action, debug_urls):
"""Constructor of `OnRunStartResponse`.
Args:
action: (`OnRunStartAction`) the action actually taken by the wrapped
session for the run() call.
debug_urls: (list of str) debug_urls used in watching the tensors during
the run() call.
"""
_check_type(action, str)
self.action = action
_check_type(debug_urls, list)
self.debug_urls = debug_urls
class OnRunEndRequest(object):
"""Request to an on-run-end callback.
The callback is invoked immediately before the wrapped run() call ends.
"""
def __init__(self,
performed_action,
run_metadata=None,
client_graph_def=None,
tf_error=None):
"""Constructor for `OnRunEndRequest`.
Args:
performed_action: (`OnRunStartAction`) Actually-performed action by the
debug-wrapper session.
run_metadata: run_metadata output from the run() call (if any).
client_graph_def: (GraphDef) GraphDef from the client side, i.e., from
the python front end of TensorFlow. Can be obtained with
session.graph.as_graph_def().
tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred
during the run (if any).
"""
_check_type(performed_action, str)
self.performed_action = performed_action
if run_metadata is not None:
_check_type(run_metadata, config_pb2.RunMetadata)
self.run_metadata = run_metadata
self.client_graph_def = client_graph_def
self.tf_error = tf_error
class OnRunEndResponse(object):
"""Response from an on-run-end callback."""
def __init__(self):
# Currently only a placeholder.
pass
class BaseDebugWrapperSession(session.SessionInterface):
"""Base class of debug-wrapper session classes.
Concrete classes that inherit from this class need to implement the abstract
methods such as on_session_init, on_run_start and on_run_end.
"""
# TODO(cais): Add on_cont_start and on_cont_end callbacks once the stepper is
# is available.
def __init__(self, sess):
"""Constructor of `BaseDebugWrapperSession`.
Args:
sess: An (unwrapped) TensorFlow session instance.
Raises:
ValueError: On invalid `OnSessionInitAction` value.
"""
_check_type(sess, session.BaseSession)
# The session being wrapped.
self._sess = sess
# Keeps track of number of run calls that have been performed on this
# debug-wrapper session.
self._run_call_count = 0
# Invoke on-session-init callback.
response = self.on_session_init(OnSessionInitRequest(self._sess))
_check_type(response, OnSessionInitResponse)
if response.action == OnSessionInitAction.PROCEED:
pass
elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP:
# TODO(cais): Implement REMOTE_INSTR_LOOP
raise NotImplementedError(
"OnSessionInitAction REMOTE_INSTR_LOOP has not been "
"implemented.")
else:
raise ValueError(
"Invalid OnSessionInitAction value: %s" % response.action)
@property
def graph(self):
return self._sess.graph
@property
def sess_str(self):
return self._sess.sess_str
@property
def session(self):
return self._sess
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Wrapper around Session.run() that inserts tensor watch options.
Args:
fetches: Same as the `fetches` arg to regular `Session.run()`.
feed_dict: Same as the `feed_dict` arg to regular `Session.run()`.
options: Same as the `options` arg to regular `Session.run()`.
run_metadata: Same as the `run_metadata` arg to regular `Session.run()`.
Returns:
Simply forwards the output of the wrapped `Session.run()` call.
Raises:
ValueError: On invalid `OnRunStartAction` value.
"""
self._run_call_count += 1
# Invoke on-run-start callback and obtain response.
run_start_resp = self.on_run_start(
OnRunStartRequest(fetches, feed_dict, options, run_metadata,
self._run_call_count))
_check_type(run_start_resp, OnRunStartResponse)
if run_start_resp.action == OnRunStartAction.DEBUG_RUN:
# Decorate RunOption to fill in debugger tensor watch specifications.
decorated_run_options = options or config_pb2.RunOptions()
run_metadata = run_metadata or config_pb2.RunMetadata()
self._decorate_run_options(decorated_run_options,
run_start_resp.debug_urls)
# Invoke the run() method of the wrapped Session. Catch any TensorFlow
# runtime errors.
tf_error = None
try:
retvals = self._sess.run(fetches,
feed_dict=feed_dict,
options=decorated_run_options,
run_metadata=run_metadata)
except errors.OpError as op_error:
tf_error = op_error
retvals = op_error
run_end_req = OnRunEndRequest(
run_start_resp.action,
run_metadata=run_metadata,
client_graph_def=self._sess.graph.as_graph_def(),
tf_error=tf_error)
elif (run_start_resp.action == OnRunStartAction.NON_DEBUG_RUN or
run_start_resp.action == OnRunStartAction.INVOKE_STEPPER):
if run_start_resp.action == OnRunStartAction.INVOKE_STEPPER:
retvals = self.invoke_node_stepper(
stepper.NodeStepper(self._sess, fetches, feed_dict),
restore_variable_values_on_exit=True)
# Invoke run() method of the wrapped session.
retvals = self._sess.run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
# Prepare arg for the on-run-end callback.
run_end_req = OnRunEndRequest(run_start_resp.action)
else:
raise ValueError(
"Invalid OnRunStartAction value: %s" % run_start_resp.action)
# Invoke on-run-end callback and obtain response.
run_end_resp = self.on_run_end(run_end_req)
_check_type(run_end_resp, OnRunEndResponse)
# Currently run_end_resp is only a placeholder. No action is taken on it.
return retvals
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError(
"partial_run_setup is not implemented for debug-wrapper sessions.")
def partial_run(self, handle, fetches, feed_dict=None):
raise NotImplementedError(
"partial_run is not implemented for debug-wrapper sessions.")
def _decorate_run_options(self, run_options, debug_urls):
"""Modify a RunOptions object for debug tensor watching.
Specifies request for outputting partition graphs. Adds
debug_tensor_watch_opts with proper debug URLs.
Args:
run_options: (RunOptions) the modified RunOptions object.
debug_urls: (list of str) debug URLs to be entered in run_options.
debug_tensor_watch_opts.
"""
run_options.output_partition_graphs = True
debug_utils.watch_graph(
run_options, self._sess.graph, debug_urls=debug_urls)
@abc.abstractmethod
def on_session_init(self, request):
"""Callback invoked during construction of the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the constructor ends.
Args:
request: (`OnSessionInitRequest`) callback request carrying information
such as the session being wrapped.
Returns:
An instance of `OnSessionInitResponse`.
"""
@abc.abstractmethod
def on_run_start(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens after the wrapper's run() call is entered,
after an increment of run call counter.
Args:
request: (`OnRunStartRequest`) callback request object carrying
information about the run call such as the fetches, feed dict, run
options, run metadata, and how many `run()` calls to this wrapper
session have occurred.
Returns:
An instance of `OnRunStartResponse`, carrying information to
1) direct the wrapper session to perform a specified action (e.g., run
with or without debug tensor watching, invoking the stepper.)
2) debug URLs used to watch the tensors.
"""
@abc.abstractmethod
def on_run_end(self, request):
"""Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the wrapper exits its run() call.
Args:
request: (`OnRunEndRequest`) callback request object carrying information
such as the actual action performed by the session wrapper for the
run() call.
Returns:
An instance of `OnRunStartResponse`.
"""
def __enter__(self):
return self._sess.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
self._sess.__exit__(exec_type, exec_value, exec_tb)
def close(self):
self._sess.close()
# TODO(cais): Add _node_name_regex_whitelist and
# _node_op_type_regex_whitelist.
@abc.abstractmethod
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
"""Callback invoked when the client intends to step through graph nodes.
Args:
node_stepper: (stepper.NodeStepper) An instance of NodeStepper to be used
in this stepping session.
restore_variable_values_on_exit: (bool) Whether any variables whose values
have been altered during this node-stepper invocation should be restored
to their old values when this invocation ends.
Returns:
The same return values as the `Session.run()` call on the same fetches as
the NodeStepper.
"""
| 35.20412 | 80 | 0.712432 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug import debug_utils
from tensorflow.python.debug import stepper
from tensorflow.python.framework import errors
def _check_type(obj, expected_type):
if not isinstance(obj, expected_type):
raise TypeError("Expected type %s; got type %s" %
(expected_type, type(obj)))
class OnSessionInitRequest(object):
def __init__(self, sess):
_check_type(sess, session.BaseSession)
self.session = sess
class OnSessionInitAction(object):
PROCEED = "proceed"
REMOTE_INSTR_LOOP = "remote_instr_loop"
class OnSessionInitResponse(object):
def __init__(self, action):
_check_type(action, str)
self.action = action
class OnRunStartRequest(object):
def __init__(self, fetches, feed_dict, run_options, run_metadata,
run_call_count):
self.fetches = fetches
self.feed_dict = feed_dict
self.run_options = run_options
self.run_metadata = run_metadata
self.run_call_count = run_call_count
class OnRunStartAction(object):
DEBUG_RUN = "debug_run"
NON_DEBUG_RUN = "non_debug_run"
INVOKE_STEPPER = "invoke_stepper"
class OnRunStartResponse(object):
def __init__(self, action, debug_urls):
_check_type(action, str)
self.action = action
_check_type(debug_urls, list)
self.debug_urls = debug_urls
class OnRunEndRequest(object):
def __init__(self,
performed_action,
run_metadata=None,
client_graph_def=None,
tf_error=None):
_check_type(performed_action, str)
self.performed_action = performed_action
if run_metadata is not None:
_check_type(run_metadata, config_pb2.RunMetadata)
self.run_metadata = run_metadata
self.client_graph_def = client_graph_def
self.tf_error = tf_error
class OnRunEndResponse(object):
def __init__(self):
pass
class BaseDebugWrapperSession(session.SessionInterface):
def __init__(self, sess):
_check_type(sess, session.BaseSession)
self._sess = sess
self._run_call_count = 0
response = self.on_session_init(OnSessionInitRequest(self._sess))
_check_type(response, OnSessionInitResponse)
if response.action == OnSessionInitAction.PROCEED:
pass
elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP:
raise NotImplementedError(
"OnSessionInitAction REMOTE_INSTR_LOOP has not been "
"implemented.")
else:
raise ValueError(
"Invalid OnSessionInitAction value: %s" % response.action)
@property
def graph(self):
return self._sess.graph
@property
def sess_str(self):
return self._sess.sess_str
@property
def session(self):
return self._sess
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
self._run_call_count += 1
run_start_resp = self.on_run_start(
OnRunStartRequest(fetches, feed_dict, options, run_metadata,
self._run_call_count))
_check_type(run_start_resp, OnRunStartResponse)
if run_start_resp.action == OnRunStartAction.DEBUG_RUN:
decorated_run_options = options or config_pb2.RunOptions()
run_metadata = run_metadata or config_pb2.RunMetadata()
self._decorate_run_options(decorated_run_options,
run_start_resp.debug_urls)
tf_error = None
try:
retvals = self._sess.run(fetches,
feed_dict=feed_dict,
options=decorated_run_options,
run_metadata=run_metadata)
except errors.OpError as op_error:
tf_error = op_error
retvals = op_error
run_end_req = OnRunEndRequest(
run_start_resp.action,
run_metadata=run_metadata,
client_graph_def=self._sess.graph.as_graph_def(),
tf_error=tf_error)
elif (run_start_resp.action == OnRunStartAction.NON_DEBUG_RUN or
run_start_resp.action == OnRunStartAction.INVOKE_STEPPER):
if run_start_resp.action == OnRunStartAction.INVOKE_STEPPER:
retvals = self.invoke_node_stepper(
stepper.NodeStepper(self._sess, fetches, feed_dict),
restore_variable_values_on_exit=True)
retvals = self._sess.run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
run_end_req = OnRunEndRequest(run_start_resp.action)
else:
raise ValueError(
"Invalid OnRunStartAction value: %s" % run_start_resp.action)
run_end_resp = self.on_run_end(run_end_req)
_check_type(run_end_resp, OnRunEndResponse)
return retvals
def partial_run_setup(self, fetches, feeds=None):
raise NotImplementedError(
"partial_run_setup is not implemented for debug-wrapper sessions.")
def partial_run(self, handle, fetches, feed_dict=None):
raise NotImplementedError(
"partial_run is not implemented for debug-wrapper sessions.")
def _decorate_run_options(self, run_options, debug_urls):
run_options.output_partition_graphs = True
debug_utils.watch_graph(
run_options, self._sess.graph, debug_urls=debug_urls)
@abc.abstractmethod
def on_session_init(self, request):
@abc.abstractmethod
def on_run_start(self, request):
@abc.abstractmethod
def on_run_end(self, request):
def __enter__(self):
return self._sess.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
self._sess.__exit__(exec_type, exec_value, exec_tb)
def close(self):
self._sess.close()
@abc.abstractmethod
def invoke_node_stepper(self,
node_stepper,
restore_variable_values_on_exit=True):
| true | true |
f72858c49321fde71f3ef219f0128d6912bc3126 | 129 | py | Python | yapup/pandas/__init__.py | klaukh/yapup | 34f62834b19d88ee610aea6c02ec346b2080d04d | [
"MIT"
] | null | null | null | yapup/pandas/__init__.py | klaukh/yapup | 34f62834b19d88ee610aea6c02ec346b2080d04d | [
"MIT"
] | null | null | null | yapup/pandas/__init__.py | klaukh/yapup | 34f62834b19d88ee610aea6c02ec346b2080d04d | [
"MIT"
] | null | null | null | # Import from individual files
from .dataframe import *
# Remove dunders
__all__ = [f for f in dir() if not f.startswith("_")]
| 18.428571 | 53 | 0.705426 |
from .dataframe import *
__all__ = [f for f in dir() if not f.startswith("_")]
| true | true |
f7285a490579fe0f79fba70d510af3f92a71a705 | 303 | py | Python | data/multilingual/Latn.SMO/Serif_8/pdf_to_json_test_Latn.SMO_Serif_8.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Latn.SMO/Serif_8/pdf_to_json_test_Latn.SMO_Serif_8.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Latn.SMO/Serif_8/pdf_to_json_test_Latn.SMO_Serif_8.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.SMO/Serif_8/udhr_Latn.SMO_Serif_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.3 | 73 | 0.811881 | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.SMO/Serif_8/udhr_Latn.SMO_Serif_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| true | true |
f7285aaeae66b1000e66ccd2a5c5ab2b62c03c0a | 118 | py | Python | accounts/views/account_views.py | Onlynfk/Freshdesk-CRM-Platform | 67137af09f7daf6fa2d19a9e70d573548137c9db | [
"MIT"
] | null | null | null | accounts/views/account_views.py | Onlynfk/Freshdesk-CRM-Platform | 67137af09f7daf6fa2d19a9e70d573548137c9db | [
"MIT"
] | null | null | null | accounts/views/account_views.py | Onlynfk/Freshdesk-CRM-Platform | 67137af09f7daf6fa2d19a9e70d573548137c9db | [
"MIT"
] | null | null | null | from django.shortcuts import render
def accounts(request):
return render(request, 'accounts/account.html')
| 19.666667 | 52 | 0.737288 | from django.shortcuts import render
def accounts(request):
return render(request, 'accounts/account.html')
| true | true |
f7285bf51531130a3c1e178a459430b0ee8380fb | 138 | py | Python | src/trafficSimulator/point.py | Naor-Yekutiely/trafficSimulator | 54e8b9a66c7bcaff24fd119a0e693187591db8a1 | [
"MIT"
] | 1 | 2021-12-06T17:17:06.000Z | 2021-12-06T17:17:06.000Z | src/trafficSimulator/point.py | Naor-Yekutiely/trafficSimulator | 54e8b9a66c7bcaff24fd119a0e693187591db8a1 | [
"MIT"
] | null | null | null | src/trafficSimulator/point.py | Naor-Yekutiely/trafficSimulator | 54e8b9a66c7bcaff24fd119a0e693187591db8a1 | [
"MIT"
] | null | null | null | class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def getPoint(self):
return (self.x, self.y)
| 17.25 | 31 | 0.507246 | class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def getPoint(self):
return (self.x, self.y)
| true | true |
f7285c026a4050700d4433328f0f70fd7238a0ed | 2,124 | py | Python | AlexaSongBot/modules/song.py | amegh-amz/AlexaSongBot | 593b288b5c2ac10c499597c0fb3234aae3a404db | [
"MIT"
] | 2 | 2021-03-22T09:33:03.000Z | 2021-03-23T13:30:20.000Z | AlexaSongBot/modules/song.py | amegh-amz/AlexaSongBot | 593b288b5c2ac10c499597c0fb3234aae3a404db | [
"MIT"
] | null | null | null | AlexaSongBot/modules/song.py | amegh-amz/AlexaSongBot | 593b288b5c2ac10c499597c0fb3234aae3a404db | [
"MIT"
] | 6 | 2021-03-22T11:56:43.000Z | 2022-02-18T15:46:20.000Z | from pyrogram import Client, filters
import asyncio
import os
from pytube import YouTube
from pyrogram.types import InlineKeyboardMarkup
from pyrogram.types import InlineKeyboardButton
from youtubesearchpython import VideosSearch
from AlexaSongBot.mrdarkprince import ignore_blacklisted_users, get_arg
from AlexaSongBot import app, LOGGER
from AlexaSongBot.sql.chat_sql import add_chat_to_db
def yt_search(song):
videosSearch = VideosSearch(song, limit=1)
result = videosSearch.result()
if not result:
return False
else:
video_id = result["result"][0]["id"]
url = f"https://youtu.be/{video_id}"
return url
@app.on_message(filters.create(ignore_blacklisted_users) & filters.command("mt"))
async def song(client, message):
chat_id = message.chat.id
user_id = message.from_user["id"]
add_chat_to_db(str(chat_id))
args = get_arg(message) + " " + "song"
if args.startswith(" "):
await message.reply("Enter a song name. Check /help")
return ""
status = await message.reply("`🔎I\'m Uploading Your Music.. 📺 Please wait some time ⏳️`` ")
video_link = yt_search(args)
if not video_link:
await status.edit("😔Song not found.")
return ""
yt = YouTube(video_link)
audio = yt.streams.filter(only_audio=True).first()
try:
download = audio.download(filename=f"{str(user_id)}")
except Exception as ex:
await status.edit("Failed to download song")
LOGGER.error(ex)
return ""
rename = os.rename(download, f"{str(user_id)}.mp3")
await app.send_chat_action(message.chat.id, "upload_audio")
title = str(yt.title)
aswin = f"✣ **Music** : [{title[:40]}]({video_link})\n✣ **Uploaded** : [MT Music\'s](https://t.me/mt_music_24)"
await app.send_audio(
caption = aswin,
chat_id=message.chat.id,
audio=f"{str(user_id)}.mp3",
duration=int(yt.length),
title=str(yt.title),
performer=str('[MT Music\'s]'),
reply_to_message_id=message.message_id,
)
await status.delete()
os.remove(f"{str(user_id)}.mp3")
| 34.819672 | 115 | 0.665254 | from pyrogram import Client, filters
import asyncio
import os
from pytube import YouTube
from pyrogram.types import InlineKeyboardMarkup
from pyrogram.types import InlineKeyboardButton
from youtubesearchpython import VideosSearch
from AlexaSongBot.mrdarkprince import ignore_blacklisted_users, get_arg
from AlexaSongBot import app, LOGGER
from AlexaSongBot.sql.chat_sql import add_chat_to_db
def yt_search(song):
videosSearch = VideosSearch(song, limit=1)
result = videosSearch.result()
if not result:
return False
else:
video_id = result["result"][0]["id"]
url = f"https://youtu.be/{video_id}"
return url
@app.on_message(filters.create(ignore_blacklisted_users) & filters.command("mt"))
async def song(client, message):
chat_id = message.chat.id
user_id = message.from_user["id"]
add_chat_to_db(str(chat_id))
args = get_arg(message) + " " + "song"
if args.startswith(" "):
await message.reply("Enter a song name. Check /help")
return ""
status = await message.reply("`🔎I\'m Uploading Your Music.. 📺 Please wait some time ⏳️`` ")
video_link = yt_search(args)
if not video_link:
await status.edit("😔Song not found.")
return ""
yt = YouTube(video_link)
audio = yt.streams.filter(only_audio=True).first()
try:
download = audio.download(filename=f"{str(user_id)}")
except Exception as ex:
await status.edit("Failed to download song")
LOGGER.error(ex)
return ""
rename = os.rename(download, f"{str(user_id)}.mp3")
await app.send_chat_action(message.chat.id, "upload_audio")
title = str(yt.title)
aswin = f"✣ **Music** : [{title[:40]}]({video_link})\n✣ **Uploaded** : [MT Music\'s](https://t.me/mt_music_24)"
await app.send_audio(
caption = aswin,
chat_id=message.chat.id,
audio=f"{str(user_id)}.mp3",
duration=int(yt.length),
title=str(yt.title),
performer=str('[MT Music\'s]'),
reply_to_message_id=message.message_id,
)
await status.delete()
os.remove(f"{str(user_id)}.mp3")
| true | true |
f7285c8cc5a851eaea8d9ca6598fc0794283e7ff | 4,627 | py | Python | homeassistant/components/gios/air_quality.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 5 | 2020-09-17T10:48:51.000Z | 2021-11-22T00:08:17.000Z | homeassistant/components/gios/air_quality.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | homeassistant/components/gios/air_quality.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 6 | 2019-12-01T19:06:52.000Z | 2020-09-17T00:57:06.000Z | """Support for the GIOS service."""
from homeassistant.components.air_quality import (
ATTR_CO,
ATTR_NO2,
ATTR_OZONE,
ATTR_PM_2_5,
ATTR_PM_10,
ATTR_SO2,
AirQualityEntity,
)
from homeassistant.const import CONF_NAME
from .const import ATTR_STATION, DATA_CLIENT, DEFAULT_SCAN_INTERVAL, DOMAIN, ICONS_MAP
ATTRIBUTION = "Data provided by GIOŚ"
SCAN_INTERVAL = DEFAULT_SCAN_INTERVAL
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add a GIOS entities from a config_entry."""
name = config_entry.data[CONF_NAME]
data = hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id]
async_add_entities([GiosAirQuality(data, name)], True)
def round_state(func):
"""Round state."""
def _decorator(self):
res = func(self)
if isinstance(res, float):
return round(res)
return res
return _decorator
class GiosAirQuality(AirQualityEntity):
"""Define an GIOS sensor."""
def __init__(self, gios, name):
"""Initialize."""
self.gios = gios
self._name = name
self._aqi = None
self._co = None
self._no2 = None
self._o3 = None
self._pm_2_5 = None
self._pm_10 = None
self._so2 = None
self._attrs = {}
@property
def name(self):
"""Return the name."""
return self._name
@property
def icon(self):
"""Return the icon."""
if self._aqi in ICONS_MAP:
return ICONS_MAP[self._aqi]
return "mdi:blur"
@property
def air_quality_index(self):
"""Return the air quality index."""
return self._aqi
@property
@round_state
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self._pm_2_5
@property
@round_state
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self._pm_10
@property
@round_state
def ozone(self):
"""Return the O3 (ozone) level."""
return self._o3
@property
@round_state
def carbon_monoxide(self):
"""Return the CO (carbon monoxide) level."""
return self._co
@property
@round_state
def sulphur_dioxide(self):
"""Return the SO2 (sulphur dioxide) level."""
return self._so2
@property
@round_state
def nitrogen_dioxide(self):
"""Return the NO2 (nitrogen dioxide) level."""
return self._no2
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self.gios.station_id
@property
def available(self):
"""Return True if entity is available."""
return self.gios.available
@property
def device_state_attributes(self):
"""Return the state attributes."""
self._attrs[ATTR_STATION] = self.gios.station_name
return self._attrs
async def async_update(self):
"""Get the data from GIOS."""
await self.gios.async_update()
if self.gios.available:
# Different measuring stations have different sets of sensors. We don't know
# what data we will get.
if "AQI" in self.gios.sensors:
self._aqi = self.gios.sensors["AQI"]["value"]
if "CO" in self.gios.sensors:
self._co = self.gios.sensors["CO"]["value"]
self._attrs[f"{ATTR_CO}_index"] = self.gios.sensors["CO"]["index"]
if "NO2" in self.gios.sensors:
self._no2 = self.gios.sensors["NO2"]["value"]
self._attrs[f"{ATTR_NO2}_index"] = self.gios.sensors["NO2"]["index"]
if "O3" in self.gios.sensors:
self._o3 = self.gios.sensors["O3"]["value"]
self._attrs[f"{ATTR_OZONE}_index"] = self.gios.sensors["O3"]["index"]
if "PM2.5" in self.gios.sensors:
self._pm_2_5 = self.gios.sensors["PM2.5"]["value"]
self._attrs[f"{ATTR_PM_2_5}_index"] = self.gios.sensors["PM2.5"][
"index"
]
if "PM10" in self.gios.sensors:
self._pm_10 = self.gios.sensors["PM10"]["value"]
self._attrs[f"{ATTR_PM_10}_index"] = self.gios.sensors["PM10"]["index"]
if "SO2" in self.gios.sensors:
self._so2 = self.gios.sensors["SO2"]["value"]
self._attrs[f"{ATTR_SO2}_index"] = self.gios.sensors["SO2"]["index"]
| 29.100629 | 88 | 0.592392 | from homeassistant.components.air_quality import (
ATTR_CO,
ATTR_NO2,
ATTR_OZONE,
ATTR_PM_2_5,
ATTR_PM_10,
ATTR_SO2,
AirQualityEntity,
)
from homeassistant.const import CONF_NAME
from .const import ATTR_STATION, DATA_CLIENT, DEFAULT_SCAN_INTERVAL, DOMAIN, ICONS_MAP
ATTRIBUTION = "Data provided by GIOŚ"
SCAN_INTERVAL = DEFAULT_SCAN_INTERVAL
async def async_setup_entry(hass, config_entry, async_add_entities):
name = config_entry.data[CONF_NAME]
data = hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id]
async_add_entities([GiosAirQuality(data, name)], True)
def round_state(func):
def _decorator(self):
res = func(self)
if isinstance(res, float):
return round(res)
return res
return _decorator
class GiosAirQuality(AirQualityEntity):
def __init__(self, gios, name):
self.gios = gios
self._name = name
self._aqi = None
self._co = None
self._no2 = None
self._o3 = None
self._pm_2_5 = None
self._pm_10 = None
self._so2 = None
self._attrs = {}
@property
def name(self):
return self._name
@property
def icon(self):
if self._aqi in ICONS_MAP:
return ICONS_MAP[self._aqi]
return "mdi:blur"
@property
def air_quality_index(self):
return self._aqi
@property
@round_state
def particulate_matter_2_5(self):
return self._pm_2_5
@property
@round_state
def particulate_matter_10(self):
return self._pm_10
@property
@round_state
def ozone(self):
return self._o3
@property
@round_state
def carbon_monoxide(self):
return self._co
@property
@round_state
def sulphur_dioxide(self):
return self._so2
@property
@round_state
def nitrogen_dioxide(self):
return self._no2
@property
def attribution(self):
return ATTRIBUTION
@property
def unique_id(self):
return self.gios.station_id
@property
def available(self):
return self.gios.available
@property
def device_state_attributes(self):
self._attrs[ATTR_STATION] = self.gios.station_name
return self._attrs
async def async_update(self):
await self.gios.async_update()
if self.gios.available:
# what data we will get.
if "AQI" in self.gios.sensors:
self._aqi = self.gios.sensors["AQI"]["value"]
if "CO" in self.gios.sensors:
self._co = self.gios.sensors["CO"]["value"]
self._attrs[f"{ATTR_CO}_index"] = self.gios.sensors["CO"]["index"]
if "NO2" in self.gios.sensors:
self._no2 = self.gios.sensors["NO2"]["value"]
self._attrs[f"{ATTR_NO2}_index"] = self.gios.sensors["NO2"]["index"]
if "O3" in self.gios.sensors:
self._o3 = self.gios.sensors["O3"]["value"]
self._attrs[f"{ATTR_OZONE}_index"] = self.gios.sensors["O3"]["index"]
if "PM2.5" in self.gios.sensors:
self._pm_2_5 = self.gios.sensors["PM2.5"]["value"]
self._attrs[f"{ATTR_PM_2_5}_index"] = self.gios.sensors["PM2.5"][
"index"
]
if "PM10" in self.gios.sensors:
self._pm_10 = self.gios.sensors["PM10"]["value"]
self._attrs[f"{ATTR_PM_10}_index"] = self.gios.sensors["PM10"]["index"]
if "SO2" in self.gios.sensors:
self._so2 = self.gios.sensors["SO2"]["value"]
self._attrs[f"{ATTR_SO2}_index"] = self.gios.sensors["SO2"]["index"]
| true | true |
f7285d209d8dec2abc3f07b89c6b4a0108cd21a6 | 2,076 | py | Python | Exercise_7_problem_1.py | student-work-agu-gis2021/lesson7-matplotlib-AbeRyusei | 2adc657c1c1c02014a5a113b25f28756df377619 | [
"BSD-3-Clause"
] | null | null | null | Exercise_7_problem_1.py | student-work-agu-gis2021/lesson7-matplotlib-AbeRyusei | 2adc657c1c1c02014a5a113b25f28756df377619 | [
"BSD-3-Clause"
] | null | null | null | Exercise_7_problem_1.py | student-work-agu-gis2021/lesson7-matplotlib-AbeRyusei | 2adc657c1c1c02014a5a113b25f28756df377619 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# ## Problem 1: Simple scatter plot using random
#
# We can generate random numbers using using a method `random.rand()` from the [NumPy package](https://numpy.org/). This example generates 10 random values:
#
# ```
# import numpy as np
# random_numbers = np.random.rand(10)
#
# ```
#
# ### Part 1
#
# Create an new data frame called `data` and add 1000 random numbers (`float`) into a new column `x` and another 1000 random numbers (`float`) into a new column `y`.
import numpy as np
import pandas as pd
# YOUR CODE HERE 1 to set data
x = np.random.rand(1000)
y = np.random.rand(1000)
data = pd.DataFrame()
data["x"] = x
data["y"] = y
# Check your random values
print(data.head())
# Check that you have the correct number of rows
assert len(data) == 1000, "There should be 1000 rows of data."
# ### Part 2
#
# YOUR CODE HERE 2 to set colors
colors = np.random.rand(1000)
# This test print should print out 10 first numbers in the variable colors
print(colors[0:10])
# Check that the length matches
assert len(colors) == 1000, "There should be 1000 random numbers for colors"
# ### Part 3
#
# #### Part 3.1
#
# Create a scatter plot of points with random colors
#
# #### Part 3.2
#
# #### Part 3.3
#
# Plot a scatter plot
# YOUR CODE HERE 3
import matplotlib.pyplot as plt
plt.scatter(x, y, s = 50, c = colors, cmap = 'rainbow', edgecolor = 'black')
# Add labels and title
# YOUR CODE HERE 4
plt.title("My random candy points")
plt.xlabel("X-label")
plt.ylabel("Y-label")
plt.show()
# Save the plot as a png file:
outputfp = "my_first_plot.png"
# YOUR CODE HERE 5
# This test print statement should print the output filename of your figure
print("Saved my first plot as:", outputfp)
#Check that the file exists (also go and open the file to check that everything is ok!)
import os
assert os.path.exists(outputfp), "Can't find the output image."
# Remember to commit your changes (including the image file) to your GitHub repo!
#
# ### Done!
#
# Now you can move to [problem 2](Exercise-7-problem-2.ipynb).
| 24.139535 | 165 | 0.691233 |
rame()
data["x"] = x
data["y"] = y
print(data.head())
assert len(data) == 1000, "There should be 1000 rows of data."
(1000)
print(colors[0:10])
assert len(colors) == 1000, "There should be 1000 random numbers for colors"
el("X-label")
plt.ylabel("Y-label")
plt.show()
outputfp = "my_first_plot.png"
print("Saved my first plot as:", outputfp)
import os
assert os.path.exists(outputfp), "Can't find the output image."
# Remember to commit your changes (including the image file) to your GitHub repo!
#
# ### Done!
#
# Now you can move to [problem 2](Exercise-7-problem-2.ipynb).
| true | true |
f7285d241ce75e9b15cd66033dc57a73fcf965fe | 3,158 | py | Python | data/p2DJ/New/program/qiskit/class/startQiskit_Class182.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/class/startQiskit_Class182.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p2DJ/New/program/qiskit/class/startQiskit_Class182.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=2
# total number=13
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
prog.y(input_qubit[1]) # number=2
prog.y(input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.cx(input_qubit[1],input_qubit[0]) # number=7
prog.x(input_qubit[0]) # number=8
prog.h(input_qubit[0]) # number=10
prog.cz(input_qubit[1],input_qubit[0]) # number=11
prog.h(input_qubit[0]) # number=12
prog.x(input_qubit[0]) # number=6
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
prog = circuit1
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startQiskit_Class182.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.196429 | 80 | 0.618746 |
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
prog.x(target)
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1])
prog.h(target)
prog.barrier()
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
prog.y(input_qubit[1])
prog.y(input_qubit[1])
prog.y(input_qubit[1])
prog.cx(input_qubit[1],input_qubit[0])
prog.x(input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[1],input_qubit[0])
prog.h(input_qubit[0])
prog.x(input_qubit[0])
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
prog = circuit1
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startQiskit_Class182.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
f7285d2b964cb3217a49516ff0bbcdfaab4c6562 | 1,154 | py | Python | src/LA_Apartment_Analysis.py | mann-brinson/LA_Apartments_Scraper | ba87e85d3539a8d91ce0a84d8f4e7616757472f2 | [
"CC0-1.0"
] | 2 | 2020-01-13T21:13:50.000Z | 2021-05-31T14:44:31.000Z | src/LA_Apartment_Analysis.py | mann-brinson/LA_Apartments_Scraper | ba87e85d3539a8d91ce0a84d8f4e7616757472f2 | [
"CC0-1.0"
] | 1 | 2021-06-02T00:52:22.000Z | 2021-06-02T00:52:22.000Z | src/LA_Apartment_Analysis.py | mann-brinson/LA_Apartments_Scraper | ba87e85d3539a8d91ce0a84d8f4e7616757472f2 | [
"CC0-1.0"
] | null | null | null | import neighborhoods_api
import apartments_scrape
import queries_from_terminal
import sys
print(f"We're in file {__file__}")
#Require the user to input this driver and source option
#Will prompt the user to enter a source argument (remote or local)
if len(sys.argv) < 2:
print('To few arguments, please put in LA_Apartment_Analysis.py and data source argument (remote or local). EX: "LA_Apartment_Analysis.py remote"')
sys.exit(0)
if sys.argv[1] == 'remote':
print("Calling neighborhoods_api.py. This should create the neighborhood table. Please wait...")
neighborhoods_api.main()
print("Calling apartments_scrape.py. This should create the apartment table. Please wait...")
apartments_scrape.main()
print("Calling queries_from_terminal.py. This should return some queries about the database. Please wait...")
queries_from_terminal.main()
elif sys.argv[1] == 'local':
print("Calling queries_from_terminal.py. This should return some queries about the database. Please wait...")
queries_from_terminal.main()
else:
print("Please enter 'remote' or 'local' as your second argument. EX: 'LA_Apartment_Analysis.py remote' ")
sys.exit(0)
| 39.793103 | 151 | 0.768631 | import neighborhoods_api
import apartments_scrape
import queries_from_terminal
import sys
print(f"We're in file {__file__}")
#Require the user to input this driver and source option
#Will prompt the user to enter a source argument (remote or local)
if len(sys.argv) < 2:
print('To few arguments, please put in LA_Apartment_Analysis.py and data source argument (remote or local). EX: "LA_Apartment_Analysis.py remote"')
sys.exit(0)
if sys.argv[1] == 'remote':
print("Calling neighborhoods_api.py. This should create the neighborhood table. Please wait...")
neighborhoods_api.main()
print("Calling apartments_scrape.py. This should create the apartment table. Please wait...")
apartments_scrape.main()
print("Calling queries_from_terminal.py. This should return some queries about the database. Please wait...")
queries_from_terminal.main()
elif sys.argv[1] == 'local':
print("Calling queries_from_terminal.py. This should return some queries about the database. Please wait...")
queries_from_terminal.main()
else:
print("Please enter 'remote' or 'local' as your second argument. EX: 'LA_Apartment_Analysis.py remote' ")
sys.exit(0)
| true | true |
f7285eae36778f3c3540ee760f0731e31def83ac | 52,140 | py | Python | appengine/components/components/auth/api_test.py | amymariaparker2401/luci-py | c5902547adc12390df6b09c825a38370f1034e8b | [
"Apache-2.0"
] | null | null | null | appengine/components/components/auth/api_test.py | amymariaparker2401/luci-py | c5902547adc12390df6b09c825a38370f1034e8b | [
"Apache-2.0"
] | 1 | 2022-03-02T09:56:27.000Z | 2022-03-02T09:56:27.000Z | appengine/components/components/auth/api_test.py | Lees78/luci-py | 7b854c55f63e648005ae8aa38e2e41cd8f99feda | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env vpython
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
# Disable 'Access to a protected member', Unused argument', 'Unused variable'.
# pylint: disable=W0212,W0612,W0613
# pylint: disable=redefined-outer-name
import datetime
import sys
import threading
import unittest
from six.moves import queue
import mock
from test_support import test_env
test_env.setup_test_env()
from google.appengine.ext import ndb
from components.auth import api
from components.auth import config
from components.auth import ipaddr
from components.auth import model
from components.auth import realms
from components.auth import replication
from components.auth.proto import replication_pb2
from components.auth.proto import security_config_pb2
from components import utils
from test_support import test_case
def new_auth_db(
replication_state=None,
global_config=None,
groups=None,
ip_whitelist_assignments=None,
ip_whitelists=None,
internal_service_regexp=None,
additional_client_ids=None
):
global_config = global_config or model.AuthGlobalConfig()
global_config.security_config = security_config_blob(internal_service_regexp)
return api.AuthDB.from_entities(
replication_state=replication_state or model.AuthReplicationState(),
global_config=global_config,
groups=groups or [],
ip_whitelist_assignments=(
ip_whitelist_assignments or model.AuthIPWhitelistAssignments()),
ip_whitelists=ip_whitelists or [],
additional_client_ids=additional_client_ids or [])
def security_config_blob(regexps=None):
regexps = regexps or ['(.*-dot-)?internal\\.example\\.com']
msg = security_config_pb2.SecurityConfig(internal_service_regexp=regexps)
return msg.SerializeToString()
class AuthDBTest(test_case.TestCase):
"""Tests for AuthDB class."""
def setUp(self):
super(AuthDBTest, self).setUp()
self.mock(api.logging, 'warning', lambda *_args: None)
self.mock(api.logging, 'error', lambda *_args: None)
def test_get_group(self):
g = model.AuthGroup(
key=model.group_key('group'),
members=[
model.Identity.from_bytes('user:b@example.com'),
model.Identity.from_bytes('user:a@example.com'),
],
globs=[model.IdentityGlob.from_bytes('user:*')],
nested=['blah'],
created_by=model.Identity.from_bytes('user:x@example.com'),
created_ts=datetime.datetime(2014, 1, 2, 3, 4, 5),
modified_by=model.Identity.from_bytes('user:y@example.com'),
modified_ts=datetime.datetime(2015, 1, 2, 3, 4, 5))
db = new_auth_db(groups=[g])
# Unknown group.
self.assertIsNone(db.get_group('blah'))
# Known group.
from_cache = db.get_group('group')
self.assertEqual(from_cache.key, g.key)
# Members list is sorted.
self.assertEqual(from_cache.members, [
model.Identity.from_bytes('user:a@example.com'),
model.Identity.from_bytes('user:b@example.com'),
])
# Fields that are know to be different.
exclude = ['members', 'auth_db_rev', 'auth_db_prev_rev']
self.assertEqual(
from_cache.to_dict(exclude=exclude),
g.to_dict(exclude=exclude))
def test_is_group_member(self):
# Test identity.
joe = model.Identity(model.IDENTITY_USER, 'joe@example.com')
# Group that includes joe via glob.
with_glob = model.AuthGroup(id='WithGlob')
with_glob.globs.append(
model.IdentityGlob(model.IDENTITY_USER, '*@example.com'))
# Group that includes joe via explicit listing.
with_listing = model.AuthGroup(id='WithListing')
with_listing.members.append(joe)
# Group that includes joe via nested group.
with_nesting = model.AuthGroup(id='WithNesting')
with_nesting.nested.append('WithListing')
# Creates AuthDB with given list of groups and then runs the check.
is_member = (lambda groups, ident, group:
new_auth_db(groups=groups).is_group_member(group, ident))
# Wildcard group includes everyone (even anonymous).
self.assertTrue(is_member([], joe, '*'))
self.assertTrue(is_member([], model.Anonymous, '*'))
# An unknown group includes nobody.
self.assertFalse(is_member([], joe, 'Missing'))
self.assertFalse(is_member([], model.Anonymous, 'Missing'))
# Globs are respected.
self.assertTrue(is_member([with_glob], joe, 'WithGlob'))
self.assertFalse(is_member([with_glob], model.Anonymous, 'WithGlob'))
# Members lists are respected.
self.assertTrue(is_member([with_listing], joe, 'WithListing'))
self.assertFalse(is_member([with_listing], model.Anonymous, 'WithListing'))
# Nested groups are respected.
self.assertTrue(is_member([with_nesting, with_listing], joe, 'WithNesting'))
self.assertFalse(
is_member([with_nesting, with_listing], model.Anonymous, 'WithNesting'))
def test_list_group(self):
def list_group(groups, group, recursive):
l = new_auth_db(groups=groups).list_group(group, recursive)
return api.GroupListing(
sorted(l.members), sorted(l.globs), sorted(l.nested))
grp_1 = model.AuthGroup(id='1')
grp_1.members.extend([
model.Identity(model.IDENTITY_USER, 'a@example.com'),
model.Identity(model.IDENTITY_USER, 'b@example.com'),
])
grp_1.globs.extend([
model.IdentityGlob(model.IDENTITY_USER, '*@a.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'),
])
grp_2 = model.AuthGroup(id='2')
grp_2.nested.append('1')
grp_2.members.extend([
# Specify 'b' again, even though it's in a nested group.
model.Identity(model.IDENTITY_USER, 'b@example.com'),
model.Identity(model.IDENTITY_USER, 'c@example.com'),
])
grp_2.globs.extend([
# Specify '*@b.example.com' again, even though it's in a nested group.
model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@c.example.com'),
])
# Unknown group.
empty = api.GroupListing([], [], [])
self.assertEqual(empty, list_group([grp_1, grp_2], 'blah', False))
self.assertEqual(empty, list_group([grp_1, grp_2], 'blah', True))
# Non recursive.
expected = api.GroupListing(
members=[
model.Identity(model.IDENTITY_USER, 'b@example.com'),
model.Identity(model.IDENTITY_USER, 'c@example.com'),
],
globs=[
model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@c.example.com'),
],
nested=['1'])
self.assertEqual(expected, list_group([grp_1, grp_2], '2', False))
# Recursive.
expected = api.GroupListing(
members=[
model.Identity(model.IDENTITY_USER, 'a@example.com'),
model.Identity(model.IDENTITY_USER, 'b@example.com'),
model.Identity(model.IDENTITY_USER, 'c@example.com'),
],
globs=[
model.IdentityGlob(model.IDENTITY_USER, '*@a.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@c.example.com'),
],
nested=['1'])
self.assertEqual(expected, list_group([grp_1, grp_2], '2', True))
def test_nested_groups_cycle(self):
# Groups that nest each other.
group1 = model.AuthGroup(id='Group1')
group1.nested.append('Group2')
group2 = model.AuthGroup(id='Group2')
group2.nested.append('Group1')
# Collect warnings.
warnings = []
self.mock(api.logging, 'warning', lambda msg, *_args: warnings.append(msg))
# This should not hang, but produce error message.
auth_db = new_auth_db(groups=[group1, group2])
self.assertFalse(
auth_db.is_group_member('Group1', model.Anonymous))
self.assertEqual(1, len(warnings))
self.assertTrue('Cycle in a group graph' in warnings[0])
def test_not_real_nested_group_cycle_aka_issue_251(self):
# See https://github.com/luci/luci-py/issues/251.
#
# B -> A, C -> [B, A]. When traversing C, A is seen twice, and this is fine.
group_A = model.AuthGroup(id='A')
group_B = model.AuthGroup(id='B')
group_C = model.AuthGroup(id='C')
group_B.nested = ['A']
group_C.nested = ['A', 'B']
db = new_auth_db(groups=[group_A, group_B, group_C])
# 'is_group_member' must not report 'Cycle in a group graph' warning.
warnings = []
self.mock(api.logging, 'warning', lambda msg, *_args: warnings.append(msg))
self.assertFalse(db.is_group_member('C', model.Anonymous))
self.assertFalse(warnings)
def test_is_allowed_oauth_client_id(self):
global_config = model.AuthGlobalConfig(
oauth_client_id='1',
oauth_additional_client_ids=['2', '3'])
auth_db = new_auth_db(
global_config=global_config,
additional_client_ids=['local'])
self.assertFalse(auth_db.is_allowed_oauth_client_id(None))
self.assertTrue(auth_db.is_allowed_oauth_client_id('1'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('2'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('3'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('local'))
self.assertTrue(
auth_db.is_allowed_oauth_client_id(api.API_EXPLORER_CLIENT_ID))
self.assertFalse(auth_db.is_allowed_oauth_client_id('4'))
def test_fetch_auth_db_lazy_bootstrap(self):
# Don't exist before the call.
self.assertFalse(model.root_key().get())
# Run bootstrap.
api._lazy_bootstrap_ran = False
api.fetch_auth_db()
# Exist now.
self.assertTrue(model.root_key().get())
# Simulate datastore wipe which can happen in tests, verify fetch_auth_db
# still works. It hits slightly different code path since wiping datastore
# doesn't reset _lazy_bootstrap_ran global var.
model.root_key().delete()
api.fetch_auth_db()
def run_auth_db_fetch_test(self, setup_cb):
now = utils.utcnow()
ident = model.Identity.from_bytes('user:a@example.com')
# Client IDs callback. Disable config.ensure_configured() since it overrides
# _additional_client_ids_cb after we mock it.
self.mock(config, 'ensure_configured', lambda: None)
self.mock(api, '_additional_client_ids_cb', lambda: ['', 'cb_client_id'])
self.mock(api, 'get_web_client_id', lambda: 'web_client_id')
# Create AuthGlobalConfig.
global_config = model.AuthGlobalConfig(key=model.root_key())
global_config.oauth_client_id = '1'
global_config.oauth_client_secret = 'secret'
global_config.oauth_additional_client_ids = ['2', '3']
global_config.security_config = security_config_blob()
global_config.token_server_url = 'token_server_url'
global_config.put()
# What we expect to see in the AuthDB.
expected_groups = {}
def add_group(name, members, globs, nested, owners):
expected_groups[name] = (
frozenset(members),
tuple(model.IdentityGlob.from_bytes(g) for g in globs),
tuple(nested),
owners,
)
model.AuthGroup(
key=model.group_key(name),
members=[model.Identity.from_bytes(m) for m in members],
globs=[model.IdentityGlob.from_bytes(g) for g in globs],
nested=nested,
owners=owners,
created_ts=now,
created_by=ident,
modified_ts=now,
modified_by=ident,
).put()
# Create a bunch of groups.
add_group(
name='Group A',
members=['user:a@example.com', 'user:b@example.com'],
globs=['user:*@example.com'],
nested=['Group B', 'Group C'],
owners='Group A')
add_group(
name='Group B',
members=['user:c@example.com'],
globs=['user:*@example.com'],
nested=[],
owners='Group A')
add_group(
name='Group C',
members=[],
globs=[],
nested=[],
owners='Group C')
# And a bunch IP whitelist.
model.AuthIPWhitelistAssignments(
key=model.ip_whitelist_assignments_key(),
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=model.Anonymous,
ip_whitelist='some ip whitelist',
created_ts=now,
created_by=ident,
comment='comment',
),
],
).put()
model.AuthIPWhitelist(
key=model.ip_whitelist_key('some ip whitelist'),
subnets=['127.0.0.1/32'],
description='description',
created_ts=now,
created_by=ident,
modified_ts=now,
modified_by=ident,
).put()
model.AuthIPWhitelist(
key=model.ip_whitelist_key('bots'),
subnets=['127.0.0.1/32'],
description='description',
created_ts=now,
created_by=ident,
modified_ts=now,
modified_by=ident,
).put()
if setup_cb:
setup_cb()
# Verify all the stuff above ends up in the auth_db.
auth_db = api.fetch_auth_db()
# global_config and additional_client_ids_cb
self.assertEqual('token_server_url', auth_db.token_server_url)
self.assertEqual(('1', 'secret', ['2', '3']), auth_db.get_oauth_config())
self.assertTrue(auth_db.is_allowed_oauth_client_id('1'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('cb_client_id'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('web_client_id'))
self.assertFalse(auth_db.is_allowed_oauth_client_id(''))
# Groups.
self.assertEqual(
expected_groups,
{
name: (g.members, g.globs, g.nested, g.owners)
for name, g in auth_db._groups.items()
})
# IP whitelists and whitelist assignments.
self.assertEqual(
{model.Anonymous: 'some ip whitelist'},
auth_db._ip_whitelist_assignments)
self.assertEqual(
{'bots': ['127.0.0.1/32'], 'some ip whitelist': ['127.0.0.1/32']},
auth_db._ip_whitelists)
return auth_db
def test_fetch_auth_db_from_entities(self):
auth_db = self.run_auth_db_fetch_test(None)
self.assertEqual('from_entities', auth_db._from_what)
def test_fetch_auth_db_from_snapshot(self):
PRIMARY_ID = 'primary_id'
PRIMARY_URL = 'https://primary_url'
AUTH_DB_REV = 12345
def setup_snapshot():
# Create AuthDB snapshot entities from existing "detailed" entities in
# the datastore.
_, snap = replication.new_auth_db_snapshot()
# Switch into Replica mode, store the snapshot.
model.AuthReplicationState(
key=model.replication_state_key(),
primary_id=PRIMARY_ID,
primary_url=PRIMARY_URL,
auth_db_rev=AUTH_DB_REV,
shard_ids=replication.store_sharded_auth_db(
auth_db=replication.auth_db_snapshot_to_proto(snap),
primary_url=PRIMARY_URL,
auth_db_rev=AUTH_DB_REV,
shard_size=100,
),
).put()
auth_db = self.run_auth_db_fetch_test(setup_snapshot)
self.assertEqual('from_proto', auth_db._from_what)
self.assertEqual(PRIMARY_ID, auth_db.primary_id)
self.assertEqual(PRIMARY_URL, auth_db.primary_url)
self.assertEqual(AUTH_DB_REV, auth_db.auth_db_rev)
def test_get_secret_bootstrap(self):
# Mock AuthSecret.bootstrap to capture calls to it.
original = api.model.AuthSecret.bootstrap
calls = []
@classmethod
def mocked_bootstrap(cls, name):
calls.append(name)
result = original(name)
result.values = ['123']
return result
self.mock(api.model.AuthSecret, 'bootstrap', mocked_bootstrap)
auth_db = new_auth_db()
got = auth_db.get_secret(api.SecretKey('some_secret'))
self.assertEqual(['123'], got)
self.assertEqual(['some_secret'], calls)
def test_is_in_ip_whitelist(self):
auth_db = new_auth_db(ip_whitelists=[
model.AuthIPWhitelist(
key=model.ip_whitelist_key('l'),
subnets=['127.0.0.1', '192.168.0.0/24']),
])
test = lambda ip: auth_db.is_in_ip_whitelist('l', ipaddr.ip_from_string(ip))
self.assertTrue(test('127.0.0.1'))
self.assertTrue(test('192.168.0.0'))
self.assertTrue(test('192.168.0.9'))
self.assertTrue(test('192.168.0.255'))
self.assertFalse(test('192.168.1.0'))
self.assertFalse(test('192.1.0.0'))
@staticmethod
def make_auth_db_with_ip_whitelist():
"""AuthDB with a@example.com assigned IP whitelist '127.0.0.1/32'."""
return new_auth_db(
ip_whitelists=[
model.AuthIPWhitelist(
key=model.ip_whitelist_key('some ip whitelist'),
subnets=['127.0.0.1/32'],
),
model.AuthIPWhitelist(
key=model.ip_whitelist_key('bots'),
subnets=['192.168.1.1/32', '::1/32'],
),
],
ip_whitelist_assignments=model.AuthIPWhitelistAssignments(
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=model.Identity(model.IDENTITY_USER, 'a@example.com'),
ip_whitelist='some ip whitelist',)
],
),
)
def test_verify_ip_whitelisted_ok(self):
# Should not raise: IP is whitelisted.
ident = model.Identity(model.IDENTITY_USER, 'a@example.com')
self.make_auth_db_with_ip_whitelist().verify_ip_whitelisted(
ident, ipaddr.ip_from_string('127.0.0.1'))
def test_verify_ip_whitelisted_not_whitelisted(self):
with self.assertRaises(api.AuthorizationError):
self.make_auth_db_with_ip_whitelist().verify_ip_whitelisted(
model.Identity(model.IDENTITY_USER, 'a@example.com'),
ipaddr.ip_from_string('192.168.0.100'))
def test_verify_ip_whitelisted_not_assigned(self):
# Should not raise: whitelist is not required for another_user@example.com.
ident = model.Identity(model.IDENTITY_USER, 'another_user@example.com')
self.make_auth_db_with_ip_whitelist().verify_ip_whitelisted(
ident, ipaddr.ip_from_string('192.168.0.100'))
def test_verify_ip_whitelisted_missing_whitelist(self):
auth_db = new_auth_db(
ip_whitelist_assignments=model.AuthIPWhitelistAssignments(
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=model.Identity(model.IDENTITY_USER, 'a@example.com'),
ip_whitelist='missing ip whitelist',)
],
),
)
with self.assertRaises(api.AuthorizationError):
auth_db.verify_ip_whitelisted(
model.Identity(model.IDENTITY_USER, 'a@example.com'),
ipaddr.ip_from_string('127.0.0.1'))
def test_is_internal_domain(self):
auth_db = new_auth_db(internal_service_regexp=[
'(.*-dot-)?a-int\\.example\\.com',
'(.*-dot-)?b-int\\.example\\.com',
])
self.assertTrue(auth_db.is_internal_domain('a-int.example.com'))
self.assertTrue(auth_db.is_internal_domain('b-int.example.com'))
self.assertTrue(auth_db.is_internal_domain('z-dot-a-int.example.com'))
self.assertTrue(auth_db.is_internal_domain('z-dot-b-int.example.com'))
self.assertFalse(auth_db.is_internal_domain('int.example.com'))
self.assertFalse(auth_db.is_internal_domain('a-int.example'))
self.assertFalse(auth_db.is_internal_domain('dot-a-int.example.com'))
def mock_replication_state(auth_db_rev):
return model.AuthReplicationState(
key=model.replication_state_key(),
primary_id='primary-id',
auth_db_rev=auth_db_rev)
class TestAuthDBCache(test_case.TestCase):
"""Tests for process-global and request-local AuthDB cache."""
def setUp(self):
super(TestAuthDBCache, self).setUp()
api.reset_local_state()
def set_time(self, ts):
"""Mocks time.time() to return |ts|."""
self.mock(api.time, 'time', lambda: ts)
def set_fetched_auth_db(self, auth_db):
"""Mocks fetch_auth_db to return |auth_db|."""
def mock_fetch_auth_db(known_auth_db=None):
if (known_auth_db is not None and
auth_db.auth_db_rev == known_auth_db.auth_db_rev):
return known_auth_db
return auth_db
self.mock(api, 'fetch_auth_db', mock_fetch_auth_db)
def test_get_request_cache_different_threads(self):
"""Ensure get_request_cache() respects multiple threads."""
# Runs in its own thread.
def thread_proc():
request_cache = api.reinitialize_request_cache()
self.assertTrue(request_cache)
# Returns same object in a context of a same request thread.
self.assertTrue(api.get_request_cache() is request_cache)
return request_cache
# Launch two threads running 'thread_proc', wait for them to stop, collect
# whatever they return.
results_queue = queue.Queue()
threads = [
threading.Thread(target=lambda: results_queue.put(thread_proc()))
for _ in range(2)
]
for t in threads:
t.start()
results = [results_queue.get(timeout=1) for _ in range(len(threads))]
# Different threads use different RequestCache objects.
self.assertTrue(results[0] is not results[1])
def test_get_request_cache_different_requests(self):
"""Ensure get_request_cache() returns new object for a new request."""
# Grab request cache for 'current' request.
request_cache = api.reinitialize_request_cache()
# Track calls to 'close'.
close_calls = []
self.mock(request_cache, 'close', lambda: close_calls.append(1))
# Should return a new instance of request cache now.
self.assertTrue(api.reinitialize_request_cache() is not request_cache)
# Old one should have been closed.
self.assertEqual(1, len(close_calls))
def test_get_process_auth_db_expiration(self):
"""Ensure get_process_auth_db() respects expiration."""
# Prepare several instances of AuthDB to be used in mocks.
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1))
# Fetch initial copy of AuthDB.
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# It doesn't expire for some time.
self.set_time(api.get_process_cache_expiration_sec() - 1)
self.set_fetched_auth_db(auth_db_v1)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# But eventually it does.
self.set_time(api.get_process_cache_expiration_sec() + 1)
self.set_fetched_auth_db(auth_db_v1)
self.assertEqual(auth_db_v1, api.get_process_auth_db())
def test_get_process_auth_db_known_version(self):
"""Ensure get_process_auth_db() respects entity group version."""
# Prepare several instances of AuthDB to be used in mocks.
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v0_again = new_auth_db(replication_state=mock_replication_state(0))
# Fetch initial copy of AuthDB.
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# Make cache expire, but setup fetch_auth_db to return a new instance of
# AuthDB, but with same entity group version. Old known instance of AuthDB
# should be reused.
self.set_time(api.get_process_cache_expiration_sec() + 1)
self.set_fetched_auth_db(auth_db_v0_again)
self.assertTrue(api.get_process_auth_db() is auth_db_v0)
def test_get_process_auth_db_multithreading(self):
"""Ensure get_process_auth_db() plays nice with multiple threads."""
def run_in_thread(func):
"""Runs |func| in a parallel thread, returns future (as Queue)."""
result = queue.Queue()
thread = threading.Thread(target=lambda: result.put(func()))
thread.start()
return result
# Prepare several instances of AuthDB to be used in mocks.
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1))
# Run initial fetch, should cache |auth_db_v0| in process cache.
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# Make process cache expire.
self.set_time(api.get_process_cache_expiration_sec() + 1)
# Start fetching AuthDB from another thread, at some point it will call
# 'fetch_auth_db', and we pause the thread then and resume main thread.
fetching_now = threading.Event()
auth_db_queue = queue.Queue()
def mock_fetch_auth_db(**_kwargs):
fetching_now.set()
return auth_db_queue.get()
self.mock(api, 'fetch_auth_db', mock_fetch_auth_db)
future = run_in_thread(api.get_process_auth_db)
# Wait for internal thread to call |fetch_auth_db|.
fetching_now.wait()
# Ok, now main thread is unblocked, while internal thread is blocking on a
# artificially slow 'fetch_auth_db' call. Main thread can now try to get
# AuthDB via get_process_auth_db(). It should get older stale copy right
# away.
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# Finish background 'fetch_auth_db' call by returning 'auth_db_v1'.
# That's what internal thread should get as result of 'get_process_auth_db'.
auth_db_queue.put(auth_db_v1)
self.assertEqual(auth_db_v1, future.get())
# Now main thread should get it as well.
self.assertEqual(auth_db_v1, api.get_process_auth_db())
def test_get_process_auth_db_exceptions(self):
"""Ensure get_process_auth_db() handles DB exceptions well."""
# Prepare several instances of AuthDB to be used in mocks.
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1))
# Fetch initial copy of AuthDB.
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# Make process cache expire.
self.set_time(api.get_process_cache_expiration_sec() + 1)
# Emulate an exception in fetch_auth_db.
def mock_fetch_auth_db(*_kwargs):
raise Exception('Boom!')
self.mock(api, 'fetch_auth_db', mock_fetch_auth_db)
# Capture calls to logging.exception.
logger_calls = []
self.mock(api.logging, 'exception', lambda *_args: logger_calls.append(1))
# Should return older copy of auth_db_v0 and log the exception.
self.assertEqual(auth_db_v0, api.get_process_auth_db())
self.assertEqual(1, len(logger_calls))
# Make fetch_auth_db to work again. Verify get_process_auth_db() works too.
self.set_fetched_auth_db(auth_db_v1)
self.assertEqual(auth_db_v1, api.get_process_auth_db())
def test_get_latest_auth_db(self):
"""Ensure get_latest_auth_db "rushes" cached AuthDB update."""
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1))
# Fetch initial copy of AuthDB.
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# Rig up fetch_auth_db to return a newer version.
self.set_fetched_auth_db(auth_db_v1)
# 'get_process_auth_db' still returns the cached one.
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# But 'get_latest_auth_db' returns a new one and updates the cached copy.
self.assertEqual(auth_db_v1, api.get_latest_auth_db())
self.assertEqual(auth_db_v1, api.get_process_auth_db())
def test_get_request_auth_db(self):
"""Ensure get_request_auth_db() caches AuthDB in request cache."""
api.reinitialize_request_cache()
# 'get_request_auth_db()' returns whatever get_process_auth_db() returns
# when called for a first time.
self.mock(api, 'get_process_auth_db', lambda: 'fake')
self.assertEqual('fake', api.get_request_auth_db())
# But then it caches it locally and reuses local copy, instead of calling
# 'get_process_auth_db()' all the time.
self.mock(api, 'get_process_auth_db', lambda: 'another-fake')
self.assertEqual('fake', api.get_request_auth_db())
def test_warmup(self):
"""Ensure api.warmup() fetches AuthDB into process-global cache."""
self.assertFalse(api._auth_db)
api.warmup()
self.assertTrue(api._auth_db)
class ApiTest(test_case.TestCase):
"""Test for publicly exported API."""
def setUp(self):
super(ApiTest, self).setUp()
api.reset_local_state()
def test_get_current_identity_unitialized(self):
"""If request cache is not initialized, returns Anonymous."""
self.assertEqual(api.get_current_identity(), model.Anonymous)
def test_get_current_identity(self):
"""Ensure get_current_identity returns whatever was put in request cache."""
ident = model.Identity.from_bytes('user:abc@example.com')
api.get_request_cache().current_identity = ident
self.assertEqual(ident, api.get_current_identity())
def test_require_decorator_ok(self):
"""@require calls the callback and then decorated function."""
callback_calls = []
def require_callback():
callback_calls.append(1)
return True
@api.require(require_callback)
def allowed(*args, **kwargs):
return (args, kwargs)
self.assertEqual(((1, 2), {'a': 3}), allowed(1, 2, a=3))
self.assertEqual(1, len(callback_calls))
def test_require_decorator_fail(self):
"""@require raises exception and doesn't call decorated function."""
forbidden_calls = []
@api.require(lambda: False)
def forbidden():
forbidden_calls.append(1)
with self.assertRaises(api.AuthorizationError):
forbidden()
self.assertFalse(forbidden_calls)
def test_require_decorator_error_msg(self):
@api.require(lambda: False, 'Forbidden!')
def forbidden():
pass
with self.assertRaisesRegexp(api.AuthorizationError, 'Forbidden!'):
forbidden()
def test_require_decorator_nesting_ok(self):
"""Permission checks are called in order."""
calls = []
def check(name):
calls.append(name)
return True
@api.require(lambda: check('A'))
@api.require(lambda: check('B'))
def allowed(arg):
return arg
self.assertEqual('value', allowed('value'))
self.assertEqual(['A', 'B'], calls)
def test_require_decorator_nesting_first_deny(self):
"""First deny raises AuthorizationError."""
calls = []
def check(name, result):
calls.append(name)
return result
forbidden_calls = []
@api.require(lambda: check('A', False))
@api.require(lambda: check('B', True))
def forbidden(arg):
forbidden_calls.append(1)
with self.assertRaises(api.AuthorizationError):
forbidden('value')
self.assertFalse(forbidden_calls)
self.assertEqual(['A'], calls)
def test_require_decorator_nesting_non_first_deny(self):
"""Non-first deny also raises AuthorizationError."""
calls = []
def check(name, result):
calls.append(name)
return result
forbidden_calls = []
@api.require(lambda: check('A', True))
@api.require(lambda: check('B', False))
def forbidden(arg):
forbidden_calls.append(1)
with self.assertRaises(api.AuthorizationError):
forbidden('value')
self.assertFalse(forbidden_calls)
self.assertEqual(['A', 'B'], calls)
def test_require_decorator_on_method(self):
calls = []
def checker():
calls.append(1)
return True
class Class(object):
@api.require(checker)
def method(self, *args, **kwargs):
return (self, args, kwargs)
obj = Class()
self.assertEqual((obj, ('value',), {'a': 2}), obj.method('value', a=2))
self.assertEqual(1, len(calls))
def test_require_decorator_on_static_method(self):
calls = []
def checker():
calls.append(1)
return True
class Class(object):
@staticmethod
@api.require(checker)
def static_method(*args, **kwargs):
return (args, kwargs)
self.assertEqual((('value',), {'a': 2}), Class.static_method('value', a=2))
self.assertEqual(1, len(calls))
def test_require_decorator_on_class_method(self):
calls = []
def checker():
calls.append(1)
return True
class Class(object):
@classmethod
@api.require(checker)
def class_method(cls, *args, **kwargs):
return (cls, args, kwargs)
self.assertEqual(
(Class, ('value',), {'a': 2}), Class.class_method('value', a=2))
self.assertEqual(1, len(calls))
def test_require_decorator_ndb_nesting_require_first(self):
calls = []
def checker():
calls.append(1)
return True
@api.require(checker)
@ndb.non_transactional
def func(*args, **kwargs):
return (args, kwargs)
self.assertEqual((('value',), {'a': 2}), func('value', a=2))
self.assertEqual(1, len(calls))
def test_require_decorator_ndb_nesting_require_last(self):
calls = []
def checker():
calls.append(1)
return True
@ndb.non_transactional
@api.require(checker)
def func(*args, **kwargs):
return (args, kwargs)
self.assertEqual((('value',), {'a': 2}), func('value', a=2))
self.assertEqual(1, len(calls))
def test_public_then_require_fails(self):
with self.assertRaises(TypeError):
@api.public
@api.require(lambda: True)
def func():
pass
def test_require_then_public_fails(self):
with self.assertRaises(TypeError):
@api.require(lambda: True)
@api.public
def func():
pass
def test_is_decorated(self):
self.assertTrue(api.is_decorated(api.public(lambda: None)))
self.assertTrue(
api.is_decorated(api.require(lambda: True)(lambda: None)))
@mock.patch('logging.info')
def test_require_log_identity(self, logfunc):
ident = model.Identity.from_bytes('user:abc@example.com')
api.get_request_cache().current_identity = ident
@api.require(lambda: True, log_identity=True)
def func():
pass
func()
logfunc.assert_called_once_with('Accessed from user:abc@example.com')
class OAuthAccountsTest(test_case.TestCase):
"""Test for extract_oauth_caller_identity function."""
def mock_all(self, user_email, client_id, allowed_client_ids=()):
class FakeUser(object):
email = lambda _: user_email
class FakeAuthDB(object):
is_allowed_oauth_client_id = lambda _, cid: cid in allowed_client_ids
self.mock(api.oauth, 'get_current_user', lambda _: FakeUser())
self.mock(api.oauth, 'get_client_id', lambda _: client_id)
self.mock(api, 'get_request_auth_db', FakeAuthDB)
@staticmethod
def user(email):
return model.Identity(model.IDENTITY_USER, email)
def test_is_allowed_oauth_client_id_ok(self):
self.mock_all('email@email.com', 'some-client-id', ['some-client-id'])
self.assertEqual(
(self.user('email@email.com'), api.new_auth_details()),
api.extract_oauth_caller_identity())
def test_is_allowed_oauth_client_id_not_ok(self):
self.mock_all('email@email.com', 'some-client-id', ['another-client-id'])
with self.assertRaises(api.AuthorizationError):
api.extract_oauth_caller_identity()
def test_is_allowed_oauth_client_id_not_ok_empty(self):
self.mock_all('email@email.com', 'some-client-id')
with self.assertRaises(api.AuthorizationError):
api.extract_oauth_caller_identity()
class AuthWebUIConfigTest(test_case.TestCase):
def test_works(self):
utils.clear_cache(api.get_web_client_id)
self.assertEqual('', api.get_web_client_id_uncached())
api.set_web_client_id('zzz')
self.assertEqual('zzz', api.get_web_client_id_uncached())
self.assertEqual('zzz', api.get_web_client_id())
class AuthDBBuilder(object):
def __init__(self):
self.groups = []
def group(self, name, members=None, globs=None, nested=None, owners=None):
self.groups.append(model.AuthGroup(
key=model.group_key(name),
members=[model.Identity.from_bytes(m) for m in (members or [])],
globs=[model.IdentityGlob.from_bytes(g) for g in (globs or [])],
nested=nested or [],
owners=owners or 'default-owners-group',
))
return self
def build(self):
return new_auth_db(groups=self.groups)
class RelevantSubgraphTest(test_case.TestCase):
def call(self, db, principal):
if '*' in principal:
principal = model.IdentityGlob.from_bytes(principal)
elif '@' in principal:
principal = model.Identity.from_bytes(principal)
graph = db.get_relevant_subgraph(principal)
# Use a dict with integer keys instead of a list to improve the readability
# of assertions below.
nodes = {}
for i, (node, edges) in enumerate(graph.describe()):
if isinstance(node, (model.Identity, model.IdentityGlob)):
node = node.to_bytes()
nodes[i] = (node, {l: sorted(s) for l, s in edges.items() if s})
return nodes
def test_empty(self):
db = AuthDBBuilder().build()
self.assertEqual(
{0: ('user:a@example.com', {})}, self.call(db, 'user:a@example.com'))
self.assertEqual(
{0: ('user:*@example.com', {})}, self.call(db, 'user:*@example.com'))
self.assertEqual(
{0: ('group', {})}, self.call(db, 'group'))
def test_identity_discoverable_directly_and_through_glob(self):
b = AuthDBBuilder()
b.group('g1', ['user:a@example.com'])
b.group('g2', ['user:b@example.com'])
b.group('g3', [], ['user:*@example.com'])
b.group('g4', ['user:a@example.com'], ['user:*'])
self.assertEqual({
0: ('user:a@example.com', {'IN': [1, 3, 4, 5]}),
1: ('user:*@example.com', {'IN': [2]}),
2: ('g3', {}),
3: ('user:*', {'IN': [4]}),
4: ('g4', {}),
5: ('g1', {}),
}, self.call(b.build(), 'user:a@example.com'))
def test_glob_is_matched_directly(self):
b = AuthDBBuilder()
b.group('g1', [], ['user:*@example.com'])
b.group('g2', [], ['user:*'])
self.assertEqual({
0: ('user:*@example.com', {'IN': [1]}),
1: ('g1', {}),
}, self.call(b.build(), 'user:*@example.com'))
def test_simple_group_lookup(self):
b = AuthDBBuilder()
b.group('g1', nested=['g2', 'g3'])
b.group('g2', nested=['g3'])
b.group('g3')
self.assertEqual({
0: ('g3', {'IN': [1, 2]}),
1: ('g1', {}),
2: ('g2', {'IN': [1]}),
}, self.call(b.build(), 'g3'))
def test_ownership_relations(self):
b = AuthDBBuilder()
b.group('a-root', nested=['b-inner'])
b.group('b-inner')
b.group('c-owned-by-root', owners='a-root')
b.group('d-includes-owned-by-root', nested=['c-owned-by-root'])
b.group('e-owned-by-3', owners='d-includes-owned-by-root')
self.assertEqual({
0: ('b-inner', {'IN': [1]}),
1: ('a-root', {'OWNS': [2]}),
2: ('c-owned-by-root', {'IN': [3]}),
3: ('d-includes-owned-by-root', {'OWNS': [4]}),
4: ('e-owned-by-3', {}),
}, self.call(b.build(), 'b-inner'))
def test_diamond(self):
b = AuthDBBuilder()
b.group('top', nested=['middle1', 'middle2'])
b.group('middle1', nested=['bottom'])
b.group('middle2', nested=['bottom'])
b.group('bottom')
self.assertEqual({
0: ('bottom', {'IN': [1, 3]}),
1: ('middle1', {'IN': [2]}),
2: ('top', {}),
3: ('middle2', {'IN': [2]}),
}, self.call(b.build(), 'bottom'))
def test_cycle(self):
# Note: cycles in groups are forbidden on API layer, but make sure we still
# handle them without hanging in case something unexpected happens and they
# appear.
b = AuthDBBuilder()
b.group('g1', nested=['g2'])
b.group('g2', nested=['g1', 'g2'])
self.assertEqual({
0: ('g2', {'IN': [0, 1]}),
1: ('g1', {'IN': [0]}),
}, self.call(b.build(), 'g2'))
def test_selfowners(self):
b = AuthDBBuilder()
b.group('g1', nested=['g2'], owners='g1')
b.group('g2')
self.assertEqual({0: ('g1', {'OWNS': [0]})}, self.call(b.build(), 'g1'))
self.assertEqual({
0: ('g2', {'IN': [1]}),
1: ('g1', {'OWNS': [1]}),
}, self.call(b.build(), 'g2'))
def test_messy_graph(self):
b = AuthDBBuilder()
b.group('directly', ['user:a@example.com'])
b.group('via-glob', [], ['user:*@example.com'])
b.group('g1', nested=['via-glob'], owners='g2')
b.group('g2', nested=['directly'])
b.group('g3', nested=['g1'])
self.assertEqual({
0: ('user:a@example.com', {'IN': [1, 5]}),
1: ('user:*@example.com', {'IN': [2]}),
2: ('via-glob', {'IN': [3]}),
3: ('g1', {'IN': [4]}),
4: ('g3', {}),
5: ('directly', {'IN': [6]}),
6: ('g2', {'OWNS': [3]}),
}, self.call(b.build(), 'user:a@example.com'))
class PermissionsTest(test_case.TestCase):
def test_happy_path(self):
p1 = api.Permission('service.subject.verb')
p2 = api.Permission('service.subject.verb')
p3 = api.Permission('service.subject.another')
self.assertEqual(p1, p2)
self.assertTrue(p1 is p2)
self.assertNotEqual(p1, p3)
self.assertEqual('service.subject.verb', str(p1))
self.assertEqual("'service.subject.verb'", '%r' % (p1,))
def test_validation_errors(self):
with self.assertRaises(TypeError):
api.Permission(123)
with self.assertRaises(TypeError):
api.Permission(u'no.unicode.here')
with self.assertRaises(ValueError):
api.Permission('too.few')
with self.assertRaises(ValueError):
api.Permission('too.too.too.much')
with self.assertRaises(ValueError):
api.Permission('has..empty')
class RealmStringsTest(test_case.TestCase):
def test_happy_path(self):
self.assertEqual(api.root_realm('proj'), 'proj:@root')
self.assertEqual(api.root_realm(u'proj'), 'proj:@root')
self.assertEqual(api.legacy_realm('proj'), 'proj:@legacy')
self.assertEqual(api.legacy_realm(u'proj'), 'proj:@legacy')
def test_validation_errors(self):
with self.assertRaises(TypeError):
api.root_realm(None)
with self.assertRaises(TypeError):
api.legacy_realm(None)
with self.assertRaises(ValueError):
api.root_realm('')
with self.assertRaises(ValueError):
api.legacy_realm('')
def test_validate_realm_name(self):
self.assertIsNone(api.validate_realm_name('proj:realm'))
self.assertIsNone(api.validate_realm_name('proj:@root'))
self.assertIsNone(api.validate_realm_name('proj:@legacy'))
self.assertIsNone(api.validate_realm_name('@internal:realm'))
self.assertIsNone(api.validate_realm_name('@internal:@root'))
self.assertIsNone(api.validate_realm_name('@internal:@legacy'))
def test_validate_realm_name_errors(self):
with self.assertRaises(ValueError):
self.assertFalse(api.validate_realm_name('realm'))
with self.assertRaises(ValueError):
self.assertFalse(api.validate_realm_name('proj:@invalid'))
with self.assertRaises(ValueError):
self.assertFalse(api.validate_realm_name('proj:re:alm'))
with self.assertRaises(ValueError):
self.assertFalse(api.validate_realm_name('@proj:realm'))
PERM0 = api.Permission('luci.dev.testing0')
PERM1 = api.Permission('luci.dev.testing1')
PERM2 = api.Permission('luci.dev.testing2')
ALL_PERMS = [PERM0, PERM1, PERM2]
ID1 = model.Identity.from_bytes('user:1@example.com')
ID2 = model.Identity.from_bytes('user:2@example.com')
ID3 = model.Identity.from_bytes('user:3@example.com')
ADMIN = model.Identity.from_bytes('user:admin@example.com')
class RealmsTest(test_case.TestCase):
@staticmethod
def auth_db(realms_map, groups=None, api_version=None):
return api.AuthDB.from_proto(
replication_state=model.AuthReplicationState(),
auth_db=replication_pb2.AuthDB(
groups=[
{
'name': name,
'members': [m.to_bytes() for m in members],
'created_by': 'user:zzz@example.com',
'modified_by': 'user:zzz@example.com',
} for name, members in (groups or {}).items()
],
realms={
'api_version': api_version or realms.API_VERSION,
'permissions': [
{'name': p.name} for p in ALL_PERMS
],
'realms': [
{
'name': name,
'bindings': [
{
'permissions': [
ALL_PERMS.index(p)
for p in perms
],
'principals': [
p if isinstance(p, str) else p.to_bytes()
for p in principals
],
} for perms, principals in sorted(bindings.items())
],
'data': {
'enforce_in_service': ['data for %s' % name],
},
} for name, bindings in sorted(realms_map.items())
],
},
),
additional_client_ids=[])
def setUp(self):
super(RealmsTest, self).setUp()
self.all_perms = {p.name: p for p in ALL_PERMS}
self.mock(api, '_all_perms', self.all_perms)
self.logs = {}
for lvl in ('info', 'warning', 'error', 'exception'):
self.logs[lvl] = []
def appender(lvl): # need to capture lvl in a separate closure
return lambda msg, *args: self.logs[lvl].append(msg % args)
self.mock(api.logging, lvl, appender(lvl))
def assert_logs_empty(self, lvl):
self.assertEqual([], self.logs[lvl])
def assert_logs(self, lvl, msg):
self.assertTrue(
any(msg in m for m in self.logs[lvl]),
'%r not in %r' % (msg, self.logs[lvl]))
def assert_check(self, db, perm, realms, ident, outcome):
self.assertEqual(
outcome, db.has_permission(perm, realms, ident),
'has_permission(%r, %r, %r) is %s, but should be %s' %
(perm, realms, ident.to_bytes(), not outcome, outcome))
def test_direct_inclusion_in_binding(self):
db = self.auth_db({
'proj:@root': {},
'proj:realm': {
(PERM0, PERM1): [ID1],
(PERM0, PERM2): [ID2],
},
'proj:another/realm': {
(PERM2,): [ID1, ID3],
},
})
self.assert_check(db, PERM0, ['proj:realm'], ID1, True)
self.assert_check(db, PERM1, ['proj:realm'], ID1, True)
self.assert_check(db, PERM2, ['proj:realm'], ID1, False)
self.assert_check(db, PERM0, ['proj:realm'], ID2, True)
self.assert_check(db, PERM1, ['proj:realm'], ID2, False)
self.assert_check(db, PERM2, ['proj:realm'], ID2, True)
self.assert_check(
db, PERM2, ['proj:realm', 'proj:another/realm'], ID1, True)
self.assert_check(
db, PERM2, ['proj:realm', 'proj:another/realm'], ID3, True)
def test_inclusion_through_group(self):
db = self.auth_db({
'proj:@root': {},
'proj:realm': {
(PERM0, PERM1): ['group:empty', 'group:g1'],
(PERM0, PERM2): ['group:empty', 'group:g2'],
},
}, groups={'empty': [], 'g1': [ID1], 'g2': [ID2]})
self.assert_check(db, PERM0, ['proj:realm'], ID1, True)
self.assert_check(db, PERM1, ['proj:realm'], ID1, True)
self.assert_check(db, PERM2, ['proj:realm'], ID1, False)
self.assert_check(db, PERM0, ['proj:realm'], ID2, True)
self.assert_check(db, PERM1, ['proj:realm'], ID2, False)
self.assert_check(db, PERM2, ['proj:realm'], ID2, True)
def test_fallback_to_root(self):
db = self.auth_db({'proj:@root': {(PERM0,): [ID1]}})
self.assert_check(db, PERM0, ['proj:@root'], ID1, True)
self.assert_check(db, PERM0, ['proj:@root'], ID2, False)
self.assert_logs_empty('warning')
self.assert_check(db, PERM0, ['proj:realm'], ID1, True)
self.assert_logs('warning', 'falling back to the root')
self.assert_check(db, PERM0, ['proj:realm'], ID2, False)
self.assert_check(db, PERM0, ['proj:another/realm'], ID1, True)
def test_missing_project(self):
db = self.auth_db({})
self.assert_check(db, PERM0, ['proj:@root'], ID1, False)
self.assert_logs('warning', 'a non-existing root realm')
self.logs['warning'] = []
self.assert_check(db, PERM0, ['proj:@legacy'], ID1, False)
self.assert_logs('warning', 'doesn\'t have a root realm')
self.logs['warning'] = []
self.assert_check(db, PERM0, ['proj:another/realm'], ID1, False)
self.assert_logs('warning', 'doesn\'t have a root realm')
self.logs['warning'] = []
def test_unknown_permission(self):
unknown = api.Permission('luci.dev.unknown')
self.all_perms[unknown.name] = unknown
db = self.auth_db({'proj:realm': {(PERM0,): [ID1]}})
self.assert_logs('warning', 'is not in the AuthDB')
self.assert_check(db, unknown, ['proj:realm'], ID1, False)
self.assert_logs('warning', 'not present in the AuthDB')
def test_realms_unavailable(self):
empty = new_auth_db()
with self.assertRaises(api.RealmsError):
empty.has_permission('luci.dev.p1', ['proj:realm'], ID1)
def test_bad_api_version(self):
with self.assertRaises(api.RealmsError):
self.auth_db({}, api_version=666)
def test_bad_permission_type(self):
db = self.auth_db({})
with self.assertRaises(TypeError):
db.has_permission('luci.dev.p1', ['proj:realm'], ID1)
def test_bad_realm_names(self):
db = self.auth_db({})
for r in ['z', ':z', 'p:', 'blah blah:z', 'p:BLAH', 'p:@z', 'p:p:z']:
with self.assertRaises(ValueError):
db.has_permission(PERM0, [r], ID1)
def test_has_permission_dryrun(self):
rc = api.RequestCache()
rc._auth_db = self.auth_db(
{'proj:@root': {(PERM0,): [ID1]}}, groups={'admin': [ADMIN]})
self.mock(api, 'get_request_cache', lambda: rc)
# Match.
self.logs['info'] = []
api.has_permission_dryrun(PERM0, ['proj:@root'], True, ID1, 'admin', 'bug')
self.assert_logs('info',
"bug: has_permission_dryrun('luci.dev.testing0', ['proj:@root'], "
"'user:1@example.com'), authdb=0: match - ALLOW")
self.logs['info'] = []
api.has_permission_dryrun(PERM1, ['proj:@root'], False, ID1, 'admin', 'bug')
self.assert_logs('info',
"bug: has_permission_dryrun('luci.dev.testing1', ['proj:@root'], "
"'user:1@example.com'), authdb=0: match - DENY")
# Mismatch.
self.logs['warning'] = []
api.has_permission_dryrun(PERM0, ['proj:@root'], False, ID1, 'admin', 'bug')
self.assert_logs('warning',
"bug: has_permission_dryrun('luci.dev.testing0', ['proj:@root'], "
"'user:1@example.com'), authdb=0: mismatch - got ALLOW, want DENY")
self.logs['warning'] = []
api.has_permission_dryrun(PERM1, ['proj:@root'], True, ID1, 'admin', 'bug')
self.assert_logs('warning',
"bug: has_permission_dryrun('luci.dev.testing1', ['proj:@root'], "
"'user:1@example.com'), authdb=0: mismatch - got DENY, want ALLOW")
# Admin match.
self.logs['info'] = []
api.has_permission_dryrun(
PERM0, ['proj:@root'], True, ADMIN, 'admin', 'bug')
self.assert_logs('info',
"bug: has_permission_dryrun('luci.dev.testing0', ['proj:@root'], "
"'user:admin@example.com'), authdb=0: match - ADMIN_ALLOW")
# Blow up.
self.logs['exception'] = []
api.has_permission_dryrun(PERM1, ['@root'], True, ID1, 'admin', 'bug')
self.assert_logs('exception',
"bug: has_permission_dryrun('luci.dev.testing1', ['@root'], "
"'user:1@example.com'), authdb=0: exception ValueError, want ALLOW")
def test_realm_data(self):
db = self.auth_db({'proj:@root': {}, 'proj:r': {}})
def realm_data(realm):
r = db.get_realm_data(realm)
return r.enforce_in_service[0] if r else None
self.assertEqual('data for proj:r', realm_data('proj:r'))
self.assertEqual('data for proj:@root', realm_data('proj:@root'))
self.assertEqual('data for proj:@root', realm_data('proj:zzz'))
self.assertEqual(None, realm_data('zzz:@root'))
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
| 35.736806 | 80 | 0.658113 |
# pylint: disable=W0212,W0612,W0613
# pylint: disable=redefined-outer-name
import datetime
import sys
import threading
import unittest
from six.moves import queue
import mock
from test_support import test_env
test_env.setup_test_env()
from google.appengine.ext import ndb
from components.auth import api
from components.auth import config
from components.auth import ipaddr
from components.auth import model
from components.auth import realms
from components.auth import replication
from components.auth.proto import replication_pb2
from components.auth.proto import security_config_pb2
from components import utils
from test_support import test_case
def new_auth_db(
replication_state=None,
global_config=None,
groups=None,
ip_whitelist_assignments=None,
ip_whitelists=None,
internal_service_regexp=None,
additional_client_ids=None
):
global_config = global_config or model.AuthGlobalConfig()
global_config.security_config = security_config_blob(internal_service_regexp)
return api.AuthDB.from_entities(
replication_state=replication_state or model.AuthReplicationState(),
global_config=global_config,
groups=groups or [],
ip_whitelist_assignments=(
ip_whitelist_assignments or model.AuthIPWhitelistAssignments()),
ip_whitelists=ip_whitelists or [],
additional_client_ids=additional_client_ids or [])
def security_config_blob(regexps=None):
regexps = regexps or ['(.*-dot-)?internal\\.example\\.com']
msg = security_config_pb2.SecurityConfig(internal_service_regexp=regexps)
return msg.SerializeToString()
class AuthDBTest(test_case.TestCase):
def setUp(self):
super(AuthDBTest, self).setUp()
self.mock(api.logging, 'warning', lambda *_args: None)
self.mock(api.logging, 'error', lambda *_args: None)
def test_get_group(self):
g = model.AuthGroup(
key=model.group_key('group'),
members=[
model.Identity.from_bytes('user:b@example.com'),
model.Identity.from_bytes('user:a@example.com'),
],
globs=[model.IdentityGlob.from_bytes('user:*')],
nested=['blah'],
created_by=model.Identity.from_bytes('user:x@example.com'),
created_ts=datetime.datetime(2014, 1, 2, 3, 4, 5),
modified_by=model.Identity.from_bytes('user:y@example.com'),
modified_ts=datetime.datetime(2015, 1, 2, 3, 4, 5))
db = new_auth_db(groups=[g])
# Unknown group.
self.assertIsNone(db.get_group('blah'))
# Known group.
from_cache = db.get_group('group')
self.assertEqual(from_cache.key, g.key)
# Members list is sorted.
self.assertEqual(from_cache.members, [
model.Identity.from_bytes('user:a@example.com'),
model.Identity.from_bytes('user:b@example.com'),
])
# Fields that are know to be different.
exclude = ['members', 'auth_db_rev', 'auth_db_prev_rev']
self.assertEqual(
from_cache.to_dict(exclude=exclude),
g.to_dict(exclude=exclude))
def test_is_group_member(self):
# Test identity.
joe = model.Identity(model.IDENTITY_USER, 'joe@example.com')
# Group that includes joe via glob.
with_glob = model.AuthGroup(id='WithGlob')
with_glob.globs.append(
model.IdentityGlob(model.IDENTITY_USER, '*@example.com'))
# Group that includes joe via explicit listing.
with_listing = model.AuthGroup(id='WithListing')
with_listing.members.append(joe)
# Group that includes joe via nested group.
with_nesting = model.AuthGroup(id='WithNesting')
with_nesting.nested.append('WithListing')
# Creates AuthDB with given list of groups and then runs the check.
is_member = (lambda groups, ident, group:
new_auth_db(groups=groups).is_group_member(group, ident))
# Wildcard group includes everyone (even anonymous).
self.assertTrue(is_member([], joe, '*'))
self.assertTrue(is_member([], model.Anonymous, '*'))
# An unknown group includes nobody.
self.assertFalse(is_member([], joe, 'Missing'))
self.assertFalse(is_member([], model.Anonymous, 'Missing'))
# Globs are respected.
self.assertTrue(is_member([with_glob], joe, 'WithGlob'))
self.assertFalse(is_member([with_glob], model.Anonymous, 'WithGlob'))
# Members lists are respected.
self.assertTrue(is_member([with_listing], joe, 'WithListing'))
self.assertFalse(is_member([with_listing], model.Anonymous, 'WithListing'))
# Nested groups are respected.
self.assertTrue(is_member([with_nesting, with_listing], joe, 'WithNesting'))
self.assertFalse(
is_member([with_nesting, with_listing], model.Anonymous, 'WithNesting'))
def test_list_group(self):
def list_group(groups, group, recursive):
l = new_auth_db(groups=groups).list_group(group, recursive)
return api.GroupListing(
sorted(l.members), sorted(l.globs), sorted(l.nested))
grp_1 = model.AuthGroup(id='1')
grp_1.members.extend([
model.Identity(model.IDENTITY_USER, 'a@example.com'),
model.Identity(model.IDENTITY_USER, 'b@example.com'),
])
grp_1.globs.extend([
model.IdentityGlob(model.IDENTITY_USER, '*@a.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'),
])
grp_2 = model.AuthGroup(id='2')
grp_2.nested.append('1')
grp_2.members.extend([
# Specify 'b' again, even though it's in a nested group.
model.Identity(model.IDENTITY_USER, 'b@example.com'),
model.Identity(model.IDENTITY_USER, 'c@example.com'),
])
grp_2.globs.extend([
model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@c.example.com'),
])
# Unknown group.
empty = api.GroupListing([], [], [])
self.assertEqual(empty, list_group([grp_1, grp_2], 'blah', False))
self.assertEqual(empty, list_group([grp_1, grp_2], 'blah', True))
# Non recursive.
expected = api.GroupListing(
members=[
model.Identity(model.IDENTITY_USER, 'b@example.com'),
model.Identity(model.IDENTITY_USER, 'c@example.com'),
],
globs=[
model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@c.example.com'),
],
nested=['1'])
self.assertEqual(expected, list_group([grp_1, grp_2], '2', False))
# Recursive.
expected = api.GroupListing(
members=[
model.Identity(model.IDENTITY_USER, 'a@example.com'),
model.Identity(model.IDENTITY_USER, 'b@example.com'),
model.Identity(model.IDENTITY_USER, 'c@example.com'),
],
globs=[
model.IdentityGlob(model.IDENTITY_USER, '*@a.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'),
model.IdentityGlob(model.IDENTITY_USER, '*@c.example.com'),
],
nested=['1'])
self.assertEqual(expected, list_group([grp_1, grp_2], '2', True))
def test_nested_groups_cycle(self):
# Groups that nest each other.
group1 = model.AuthGroup(id='Group1')
group1.nested.append('Group2')
group2 = model.AuthGroup(id='Group2')
group2.nested.append('Group1')
# Collect warnings.
warnings = []
self.mock(api.logging, 'warning', lambda msg, *_args: warnings.append(msg))
# This should not hang, but produce error message.
auth_db = new_auth_db(groups=[group1, group2])
self.assertFalse(
auth_db.is_group_member('Group1', model.Anonymous))
self.assertEqual(1, len(warnings))
self.assertTrue('Cycle in a group graph' in warnings[0])
def test_not_real_nested_group_cycle_aka_issue_251(self):
# See https://github.com/luci/luci-py/issues/251.
#
# B -> A, C -> [B, A]. When traversing C, A is seen twice, and this is fine.
group_A = model.AuthGroup(id='A')
group_B = model.AuthGroup(id='B')
group_C = model.AuthGroup(id='C')
group_B.nested = ['A']
group_C.nested = ['A', 'B']
db = new_auth_db(groups=[group_A, group_B, group_C])
# 'is_group_member' must not report 'Cycle in a group graph' warning.
warnings = []
self.mock(api.logging, 'warning', lambda msg, *_args: warnings.append(msg))
self.assertFalse(db.is_group_member('C', model.Anonymous))
self.assertFalse(warnings)
def test_is_allowed_oauth_client_id(self):
global_config = model.AuthGlobalConfig(
oauth_client_id='1',
oauth_additional_client_ids=['2', '3'])
auth_db = new_auth_db(
global_config=global_config,
additional_client_ids=['local'])
self.assertFalse(auth_db.is_allowed_oauth_client_id(None))
self.assertTrue(auth_db.is_allowed_oauth_client_id('1'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('2'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('3'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('local'))
self.assertTrue(
auth_db.is_allowed_oauth_client_id(api.API_EXPLORER_CLIENT_ID))
self.assertFalse(auth_db.is_allowed_oauth_client_id('4'))
def test_fetch_auth_db_lazy_bootstrap(self):
# Don't exist before the call.
self.assertFalse(model.root_key().get())
api._lazy_bootstrap_ran = False
api.fetch_auth_db()
self.assertTrue(model.root_key().get())
model.root_key().delete()
api.fetch_auth_db()
def run_auth_db_fetch_test(self, setup_cb):
now = utils.utcnow()
ident = model.Identity.from_bytes('user:a@example.com')
# Client IDs callback. Disable config.ensure_configured() since it overrides
# _additional_client_ids_cb after we mock it.
self.mock(config, 'ensure_configured', lambda: None)
self.mock(api, '_additional_client_ids_cb', lambda: ['', 'cb_client_id'])
self.mock(api, 'get_web_client_id', lambda: 'web_client_id')
# Create AuthGlobalConfig.
global_config = model.AuthGlobalConfig(key=model.root_key())
global_config.oauth_client_id = '1'
global_config.oauth_client_secret = 'secret'
global_config.oauth_additional_client_ids = ['2', '3']
global_config.security_config = security_config_blob()
global_config.token_server_url = 'token_server_url'
global_config.put()
# What we expect to see in the AuthDB.
expected_groups = {}
def add_group(name, members, globs, nested, owners):
expected_groups[name] = (
frozenset(members),
tuple(model.IdentityGlob.from_bytes(g) for g in globs),
tuple(nested),
owners,
)
model.AuthGroup(
key=model.group_key(name),
members=[model.Identity.from_bytes(m) for m in members],
globs=[model.IdentityGlob.from_bytes(g) for g in globs],
nested=nested,
owners=owners,
created_ts=now,
created_by=ident,
modified_ts=now,
modified_by=ident,
).put()
# Create a bunch of groups.
add_group(
name='Group A',
members=['user:a@example.com', 'user:b@example.com'],
globs=['user:*@example.com'],
nested=['Group B', 'Group C'],
owners='Group A')
add_group(
name='Group B',
members=['user:c@example.com'],
globs=['user:*@example.com'],
nested=[],
owners='Group A')
add_group(
name='Group C',
members=[],
globs=[],
nested=[],
owners='Group C')
# And a bunch IP whitelist.
model.AuthIPWhitelistAssignments(
key=model.ip_whitelist_assignments_key(),
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=model.Anonymous,
ip_whitelist='some ip whitelist',
created_ts=now,
created_by=ident,
comment='comment',
),
],
).put()
model.AuthIPWhitelist(
key=model.ip_whitelist_key('some ip whitelist'),
subnets=['127.0.0.1/32'],
description='description',
created_ts=now,
created_by=ident,
modified_ts=now,
modified_by=ident,
).put()
model.AuthIPWhitelist(
key=model.ip_whitelist_key('bots'),
subnets=['127.0.0.1/32'],
description='description',
created_ts=now,
created_by=ident,
modified_ts=now,
modified_by=ident,
).put()
if setup_cb:
setup_cb()
# Verify all the stuff above ends up in the auth_db.
auth_db = api.fetch_auth_db()
# global_config and additional_client_ids_cb
self.assertEqual('token_server_url', auth_db.token_server_url)
self.assertEqual(('1', 'secret', ['2', '3']), auth_db.get_oauth_config())
self.assertTrue(auth_db.is_allowed_oauth_client_id('1'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('cb_client_id'))
self.assertTrue(auth_db.is_allowed_oauth_client_id('web_client_id'))
self.assertFalse(auth_db.is_allowed_oauth_client_id(''))
# Groups.
self.assertEqual(
expected_groups,
{
name: (g.members, g.globs, g.nested, g.owners)
for name, g in auth_db._groups.items()
})
# IP whitelists and whitelist assignments.
self.assertEqual(
{model.Anonymous: 'some ip whitelist'},
auth_db._ip_whitelist_assignments)
self.assertEqual(
{'bots': ['127.0.0.1/32'], 'some ip whitelist': ['127.0.0.1/32']},
auth_db._ip_whitelists)
return auth_db
def test_fetch_auth_db_from_entities(self):
auth_db = self.run_auth_db_fetch_test(None)
self.assertEqual('from_entities', auth_db._from_what)
def test_fetch_auth_db_from_snapshot(self):
PRIMARY_ID = 'primary_id'
PRIMARY_URL = 'https://primary_url'
AUTH_DB_REV = 12345
def setup_snapshot():
# Create AuthDB snapshot entities from existing "detailed" entities in
# the datastore.
_, snap = replication.new_auth_db_snapshot()
# Switch into Replica mode, store the snapshot.
model.AuthReplicationState(
key=model.replication_state_key(),
primary_id=PRIMARY_ID,
primary_url=PRIMARY_URL,
auth_db_rev=AUTH_DB_REV,
shard_ids=replication.store_sharded_auth_db(
auth_db=replication.auth_db_snapshot_to_proto(snap),
primary_url=PRIMARY_URL,
auth_db_rev=AUTH_DB_REV,
shard_size=100,
),
).put()
auth_db = self.run_auth_db_fetch_test(setup_snapshot)
self.assertEqual('from_proto', auth_db._from_what)
self.assertEqual(PRIMARY_ID, auth_db.primary_id)
self.assertEqual(PRIMARY_URL, auth_db.primary_url)
self.assertEqual(AUTH_DB_REV, auth_db.auth_db_rev)
def test_get_secret_bootstrap(self):
# Mock AuthSecret.bootstrap to capture calls to it.
original = api.model.AuthSecret.bootstrap
calls = []
@classmethod
def mocked_bootstrap(cls, name):
calls.append(name)
result = original(name)
result.values = ['123']
return result
self.mock(api.model.AuthSecret, 'bootstrap', mocked_bootstrap)
auth_db = new_auth_db()
got = auth_db.get_secret(api.SecretKey('some_secret'))
self.assertEqual(['123'], got)
self.assertEqual(['some_secret'], calls)
def test_is_in_ip_whitelist(self):
auth_db = new_auth_db(ip_whitelists=[
model.AuthIPWhitelist(
key=model.ip_whitelist_key('l'),
subnets=['127.0.0.1', '192.168.0.0/24']),
])
test = lambda ip: auth_db.is_in_ip_whitelist('l', ipaddr.ip_from_string(ip))
self.assertTrue(test('127.0.0.1'))
self.assertTrue(test('192.168.0.0'))
self.assertTrue(test('192.168.0.9'))
self.assertTrue(test('192.168.0.255'))
self.assertFalse(test('192.168.1.0'))
self.assertFalse(test('192.1.0.0'))
@staticmethod
def make_auth_db_with_ip_whitelist():
return new_auth_db(
ip_whitelists=[
model.AuthIPWhitelist(
key=model.ip_whitelist_key('some ip whitelist'),
subnets=['127.0.0.1/32'],
),
model.AuthIPWhitelist(
key=model.ip_whitelist_key('bots'),
subnets=['192.168.1.1/32', '::1/32'],
),
],
ip_whitelist_assignments=model.AuthIPWhitelistAssignments(
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=model.Identity(model.IDENTITY_USER, 'a@example.com'),
ip_whitelist='some ip whitelist',)
],
),
)
def test_verify_ip_whitelisted_ok(self):
# Should not raise: IP is whitelisted.
ident = model.Identity(model.IDENTITY_USER, 'a@example.com')
self.make_auth_db_with_ip_whitelist().verify_ip_whitelisted(
ident, ipaddr.ip_from_string('127.0.0.1'))
def test_verify_ip_whitelisted_not_whitelisted(self):
with self.assertRaises(api.AuthorizationError):
self.make_auth_db_with_ip_whitelist().verify_ip_whitelisted(
model.Identity(model.IDENTITY_USER, 'a@example.com'),
ipaddr.ip_from_string('192.168.0.100'))
def test_verify_ip_whitelisted_not_assigned(self):
# Should not raise: whitelist is not required for another_user@example.com.
ident = model.Identity(model.IDENTITY_USER, 'another_user@example.com')
self.make_auth_db_with_ip_whitelist().verify_ip_whitelisted(
ident, ipaddr.ip_from_string('192.168.0.100'))
def test_verify_ip_whitelisted_missing_whitelist(self):
auth_db = new_auth_db(
ip_whitelist_assignments=model.AuthIPWhitelistAssignments(
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=model.Identity(model.IDENTITY_USER, 'a@example.com'),
ip_whitelist='missing ip whitelist',)
],
),
)
with self.assertRaises(api.AuthorizationError):
auth_db.verify_ip_whitelisted(
model.Identity(model.IDENTITY_USER, 'a@example.com'),
ipaddr.ip_from_string('127.0.0.1'))
def test_is_internal_domain(self):
auth_db = new_auth_db(internal_service_regexp=[
'(.*-dot-)?a-int\\.example\\.com',
'(.*-dot-)?b-int\\.example\\.com',
])
self.assertTrue(auth_db.is_internal_domain('a-int.example.com'))
self.assertTrue(auth_db.is_internal_domain('b-int.example.com'))
self.assertTrue(auth_db.is_internal_domain('z-dot-a-int.example.com'))
self.assertTrue(auth_db.is_internal_domain('z-dot-b-int.example.com'))
self.assertFalse(auth_db.is_internal_domain('int.example.com'))
self.assertFalse(auth_db.is_internal_domain('a-int.example'))
self.assertFalse(auth_db.is_internal_domain('dot-a-int.example.com'))
def mock_replication_state(auth_db_rev):
return model.AuthReplicationState(
key=model.replication_state_key(),
primary_id='primary-id',
auth_db_rev=auth_db_rev)
class TestAuthDBCache(test_case.TestCase):
def setUp(self):
super(TestAuthDBCache, self).setUp()
api.reset_local_state()
def set_time(self, ts):
self.mock(api.time, 'time', lambda: ts)
def set_fetched_auth_db(self, auth_db):
def mock_fetch_auth_db(known_auth_db=None):
if (known_auth_db is not None and
auth_db.auth_db_rev == known_auth_db.auth_db_rev):
return known_auth_db
return auth_db
self.mock(api, 'fetch_auth_db', mock_fetch_auth_db)
def test_get_request_cache_different_threads(self):
# Runs in its own thread.
def thread_proc():
request_cache = api.reinitialize_request_cache()
self.assertTrue(request_cache)
# Returns same object in a context of a same request thread.
self.assertTrue(api.get_request_cache() is request_cache)
return request_cache
# Launch two threads running 'thread_proc', wait for them to stop, collect
# whatever they return.
results_queue = queue.Queue()
threads = [
threading.Thread(target=lambda: results_queue.put(thread_proc()))
for _ in range(2)
]
for t in threads:
t.start()
results = [results_queue.get(timeout=1) for _ in range(len(threads))]
# Different threads use different RequestCache objects.
self.assertTrue(results[0] is not results[1])
def test_get_request_cache_different_requests(self):
# Grab request cache for 'current' request.
request_cache = api.reinitialize_request_cache()
# Track calls to 'close'.
close_calls = []
self.mock(request_cache, 'close', lambda: close_calls.append(1))
# Should return a new instance of request cache now.
self.assertTrue(api.reinitialize_request_cache() is not request_cache)
# Old one should have been closed.
self.assertEqual(1, len(close_calls))
def test_get_process_auth_db_expiration(self):
# Prepare several instances of AuthDB to be used in mocks.
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1))
# Fetch initial copy of AuthDB.
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# It doesn't expire for some time.
self.set_time(api.get_process_cache_expiration_sec() - 1)
self.set_fetched_auth_db(auth_db_v1)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
self.set_time(api.get_process_cache_expiration_sec() + 1)
self.set_fetched_auth_db(auth_db_v1)
self.assertEqual(auth_db_v1, api.get_process_auth_db())
def test_get_process_auth_db_known_version(self):
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v0_again = new_auth_db(replication_state=mock_replication_state(0))
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
self.set_time(api.get_process_cache_expiration_sec() + 1)
self.set_fetched_auth_db(auth_db_v0_again)
self.assertTrue(api.get_process_auth_db() is auth_db_v0)
def test_get_process_auth_db_multithreading(self):
def run_in_thread(func):
result = queue.Queue()
thread = threading.Thread(target=lambda: result.put(func()))
thread.start()
return result
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1))
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
self.set_time(api.get_process_cache_expiration_sec() + 1)
fetching_now = threading.Event()
auth_db_queue = queue.Queue()
def mock_fetch_auth_db(**_kwargs):
fetching_now.set()
return auth_db_queue.get()
self.mock(api, 'fetch_auth_db', mock_fetch_auth_db)
future = run_in_thread(api.get_process_auth_db)
fetching_now.wait()
self.assertEqual(auth_db_v0, api.get_process_auth_db())
auth_db_queue.put(auth_db_v1)
self.assertEqual(auth_db_v1, future.get())
# Now main thread should get it as well.
self.assertEqual(auth_db_v1, api.get_process_auth_db())
def test_get_process_auth_db_exceptions(self):
# Prepare several instances of AuthDB to be used in mocks.
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1))
# Fetch initial copy of AuthDB.
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# Make process cache expire.
self.set_time(api.get_process_cache_expiration_sec() + 1)
# Emulate an exception in fetch_auth_db.
def mock_fetch_auth_db(*_kwargs):
raise Exception('Boom!')
self.mock(api, 'fetch_auth_db', mock_fetch_auth_db)
# Capture calls to logging.exception.
logger_calls = []
self.mock(api.logging, 'exception', lambda *_args: logger_calls.append(1))
# Should return older copy of auth_db_v0 and log the exception.
self.assertEqual(auth_db_v0, api.get_process_auth_db())
self.assertEqual(1, len(logger_calls))
# Make fetch_auth_db to work again. Verify get_process_auth_db() works too.
self.set_fetched_auth_db(auth_db_v1)
self.assertEqual(auth_db_v1, api.get_process_auth_db())
def test_get_latest_auth_db(self):
auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0))
auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1))
# Fetch initial copy of AuthDB.
self.set_time(0)
self.set_fetched_auth_db(auth_db_v0)
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# Rig up fetch_auth_db to return a newer version.
self.set_fetched_auth_db(auth_db_v1)
# 'get_process_auth_db' still returns the cached one.
self.assertEqual(auth_db_v0, api.get_process_auth_db())
# But 'get_latest_auth_db' returns a new one and updates the cached copy.
self.assertEqual(auth_db_v1, api.get_latest_auth_db())
self.assertEqual(auth_db_v1, api.get_process_auth_db())
def test_get_request_auth_db(self):
api.reinitialize_request_cache()
# 'get_request_auth_db()' returns whatever get_process_auth_db() returns
# when called for a first time.
self.mock(api, 'get_process_auth_db', lambda: 'fake')
self.assertEqual('fake', api.get_request_auth_db())
# But then it caches it locally and reuses local copy, instead of calling
# 'get_process_auth_db()' all the time.
self.mock(api, 'get_process_auth_db', lambda: 'another-fake')
self.assertEqual('fake', api.get_request_auth_db())
def test_warmup(self):
self.assertFalse(api._auth_db)
api.warmup()
self.assertTrue(api._auth_db)
class ApiTest(test_case.TestCase):
def setUp(self):
super(ApiTest, self).setUp()
api.reset_local_state()
def test_get_current_identity_unitialized(self):
self.assertEqual(api.get_current_identity(), model.Anonymous)
def test_get_current_identity(self):
ident = model.Identity.from_bytes('user:abc@example.com')
api.get_request_cache().current_identity = ident
self.assertEqual(ident, api.get_current_identity())
def test_require_decorator_ok(self):
callback_calls = []
def require_callback():
callback_calls.append(1)
return True
@api.require(require_callback)
def allowed(*args, **kwargs):
return (args, kwargs)
self.assertEqual(((1, 2), {'a': 3}), allowed(1, 2, a=3))
self.assertEqual(1, len(callback_calls))
def test_require_decorator_fail(self):
forbidden_calls = []
@api.require(lambda: False)
def forbidden():
forbidden_calls.append(1)
with self.assertRaises(api.AuthorizationError):
forbidden()
self.assertFalse(forbidden_calls)
def test_require_decorator_error_msg(self):
@api.require(lambda: False, 'Forbidden!')
def forbidden():
pass
with self.assertRaisesRegexp(api.AuthorizationError, 'Forbidden!'):
forbidden()
def test_require_decorator_nesting_ok(self):
calls = []
def check(name):
calls.append(name)
return True
@api.require(lambda: check('A'))
@api.require(lambda: check('B'))
def allowed(arg):
return arg
self.assertEqual('value', allowed('value'))
self.assertEqual(['A', 'B'], calls)
def test_require_decorator_nesting_first_deny(self):
calls = []
def check(name, result):
calls.append(name)
return result
forbidden_calls = []
@api.require(lambda: check('A', False))
@api.require(lambda: check('B', True))
def forbidden(arg):
forbidden_calls.append(1)
with self.assertRaises(api.AuthorizationError):
forbidden('value')
self.assertFalse(forbidden_calls)
self.assertEqual(['A'], calls)
def test_require_decorator_nesting_non_first_deny(self):
calls = []
def check(name, result):
calls.append(name)
return result
forbidden_calls = []
@api.require(lambda: check('A', True))
@api.require(lambda: check('B', False))
def forbidden(arg):
forbidden_calls.append(1)
with self.assertRaises(api.AuthorizationError):
forbidden('value')
self.assertFalse(forbidden_calls)
self.assertEqual(['A', 'B'], calls)
def test_require_decorator_on_method(self):
calls = []
def checker():
calls.append(1)
return True
class Class(object):
@api.require(checker)
def method(self, *args, **kwargs):
return (self, args, kwargs)
obj = Class()
self.assertEqual((obj, ('value',), {'a': 2}), obj.method('value', a=2))
self.assertEqual(1, len(calls))
def test_require_decorator_on_static_method(self):
calls = []
def checker():
calls.append(1)
return True
class Class(object):
@staticmethod
@api.require(checker)
def static_method(*args, **kwargs):
return (args, kwargs)
self.assertEqual((('value',), {'a': 2}), Class.static_method('value', a=2))
self.assertEqual(1, len(calls))
def test_require_decorator_on_class_method(self):
calls = []
def checker():
calls.append(1)
return True
class Class(object):
@classmethod
@api.require(checker)
def class_method(cls, *args, **kwargs):
return (cls, args, kwargs)
self.assertEqual(
(Class, ('value',), {'a': 2}), Class.class_method('value', a=2))
self.assertEqual(1, len(calls))
def test_require_decorator_ndb_nesting_require_first(self):
calls = []
def checker():
calls.append(1)
return True
@api.require(checker)
@ndb.non_transactional
def func(*args, **kwargs):
return (args, kwargs)
self.assertEqual((('value',), {'a': 2}), func('value', a=2))
self.assertEqual(1, len(calls))
def test_require_decorator_ndb_nesting_require_last(self):
calls = []
def checker():
calls.append(1)
return True
@ndb.non_transactional
@api.require(checker)
def func(*args, **kwargs):
return (args, kwargs)
self.assertEqual((('value',), {'a': 2}), func('value', a=2))
self.assertEqual(1, len(calls))
def test_public_then_require_fails(self):
with self.assertRaises(TypeError):
@api.public
@api.require(lambda: True)
def func():
pass
def test_require_then_public_fails(self):
with self.assertRaises(TypeError):
@api.require(lambda: True)
@api.public
def func():
pass
def test_is_decorated(self):
self.assertTrue(api.is_decorated(api.public(lambda: None)))
self.assertTrue(
api.is_decorated(api.require(lambda: True)(lambda: None)))
@mock.patch('logging.info')
def test_require_log_identity(self, logfunc):
ident = model.Identity.from_bytes('user:abc@example.com')
api.get_request_cache().current_identity = ident
@api.require(lambda: True, log_identity=True)
def func():
pass
func()
logfunc.assert_called_once_with('Accessed from user:abc@example.com')
class OAuthAccountsTest(test_case.TestCase):
def mock_all(self, user_email, client_id, allowed_client_ids=()):
class FakeUser(object):
email = lambda _: user_email
class FakeAuthDB(object):
is_allowed_oauth_client_id = lambda _, cid: cid in allowed_client_ids
self.mock(api.oauth, 'get_current_user', lambda _: FakeUser())
self.mock(api.oauth, 'get_client_id', lambda _: client_id)
self.mock(api, 'get_request_auth_db', FakeAuthDB)
@staticmethod
def user(email):
return model.Identity(model.IDENTITY_USER, email)
def test_is_allowed_oauth_client_id_ok(self):
self.mock_all('email@email.com', 'some-client-id', ['some-client-id'])
self.assertEqual(
(self.user('email@email.com'), api.new_auth_details()),
api.extract_oauth_caller_identity())
def test_is_allowed_oauth_client_id_not_ok(self):
self.mock_all('email@email.com', 'some-client-id', ['another-client-id'])
with self.assertRaises(api.AuthorizationError):
api.extract_oauth_caller_identity()
def test_is_allowed_oauth_client_id_not_ok_empty(self):
self.mock_all('email@email.com', 'some-client-id')
with self.assertRaises(api.AuthorizationError):
api.extract_oauth_caller_identity()
class AuthWebUIConfigTest(test_case.TestCase):
def test_works(self):
utils.clear_cache(api.get_web_client_id)
self.assertEqual('', api.get_web_client_id_uncached())
api.set_web_client_id('zzz')
self.assertEqual('zzz', api.get_web_client_id_uncached())
self.assertEqual('zzz', api.get_web_client_id())
class AuthDBBuilder(object):
def __init__(self):
self.groups = []
def group(self, name, members=None, globs=None, nested=None, owners=None):
self.groups.append(model.AuthGroup(
key=model.group_key(name),
members=[model.Identity.from_bytes(m) for m in (members or [])],
globs=[model.IdentityGlob.from_bytes(g) for g in (globs or [])],
nested=nested or [],
owners=owners or 'default-owners-group',
))
return self
def build(self):
return new_auth_db(groups=self.groups)
class RelevantSubgraphTest(test_case.TestCase):
def call(self, db, principal):
if '*' in principal:
principal = model.IdentityGlob.from_bytes(principal)
elif '@' in principal:
principal = model.Identity.from_bytes(principal)
graph = db.get_relevant_subgraph(principal)
# Use a dict with integer keys instead of a list to improve the readability
# of assertions below.
nodes = {}
for i, (node, edges) in enumerate(graph.describe()):
if isinstance(node, (model.Identity, model.IdentityGlob)):
node = node.to_bytes()
nodes[i] = (node, {l: sorted(s) for l, s in edges.items() if s})
return nodes
def test_empty(self):
db = AuthDBBuilder().build()
self.assertEqual(
{0: ('user:a@example.com', {})}, self.call(db, 'user:a@example.com'))
self.assertEqual(
{0: ('user:*@example.com', {})}, self.call(db, 'user:*@example.com'))
self.assertEqual(
{0: ('group', {})}, self.call(db, 'group'))
def test_identity_discoverable_directly_and_through_glob(self):
b = AuthDBBuilder()
b.group('g1', ['user:a@example.com'])
b.group('g2', ['user:b@example.com'])
b.group('g3', [], ['user:*@example.com'])
b.group('g4', ['user:a@example.com'], ['user:*'])
self.assertEqual({
0: ('user:a@example.com', {'IN': [1, 3, 4, 5]}),
1: ('user:*@example.com', {'IN': [2]}),
2: ('g3', {}),
3: ('user:*', {'IN': [4]}),
4: ('g4', {}),
5: ('g1', {}),
}, self.call(b.build(), 'user:a@example.com'))
def test_glob_is_matched_directly(self):
b = AuthDBBuilder()
b.group('g1', [], ['user:*@example.com'])
b.group('g2', [], ['user:*'])
self.assertEqual({
0: ('user:*@example.com', {'IN': [1]}),
1: ('g1', {}),
}, self.call(b.build(), 'user:*@example.com'))
def test_simple_group_lookup(self):
b = AuthDBBuilder()
b.group('g1', nested=['g2', 'g3'])
b.group('g2', nested=['g3'])
b.group('g3')
self.assertEqual({
0: ('g3', {'IN': [1, 2]}),
1: ('g1', {}),
2: ('g2', {'IN': [1]}),
}, self.call(b.build(), 'g3'))
def test_ownership_relations(self):
b = AuthDBBuilder()
b.group('a-root', nested=['b-inner'])
b.group('b-inner')
b.group('c-owned-by-root', owners='a-root')
b.group('d-includes-owned-by-root', nested=['c-owned-by-root'])
b.group('e-owned-by-3', owners='d-includes-owned-by-root')
self.assertEqual({
0: ('b-inner', {'IN': [1]}),
1: ('a-root', {'OWNS': [2]}),
2: ('c-owned-by-root', {'IN': [3]}),
3: ('d-includes-owned-by-root', {'OWNS': [4]}),
4: ('e-owned-by-3', {}),
}, self.call(b.build(), 'b-inner'))
def test_diamond(self):
b = AuthDBBuilder()
b.group('top', nested=['middle1', 'middle2'])
b.group('middle1', nested=['bottom'])
b.group('middle2', nested=['bottom'])
b.group('bottom')
self.assertEqual({
0: ('bottom', {'IN': [1, 3]}),
1: ('middle1', {'IN': [2]}),
2: ('top', {}),
3: ('middle2', {'IN': [2]}),
}, self.call(b.build(), 'bottom'))
def test_cycle(self):
# Note: cycles in groups are forbidden on API layer, but make sure we still
# handle them without hanging in case something unexpected happens and they
# appear.
b = AuthDBBuilder()
b.group('g1', nested=['g2'])
b.group('g2', nested=['g1', 'g2'])
self.assertEqual({
0: ('g2', {'IN': [0, 1]}),
1: ('g1', {'IN': [0]}),
}, self.call(b.build(), 'g2'))
def test_selfowners(self):
b = AuthDBBuilder()
b.group('g1', nested=['g2'], owners='g1')
b.group('g2')
self.assertEqual({0: ('g1', {'OWNS': [0]})}, self.call(b.build(), 'g1'))
self.assertEqual({
0: ('g2', {'IN': [1]}),
1: ('g1', {'OWNS': [1]}),
}, self.call(b.build(), 'g2'))
def test_messy_graph(self):
b = AuthDBBuilder()
b.group('directly', ['user:a@example.com'])
b.group('via-glob', [], ['user:*@example.com'])
b.group('g1', nested=['via-glob'], owners='g2')
b.group('g2', nested=['directly'])
b.group('g3', nested=['g1'])
self.assertEqual({
0: ('user:a@example.com', {'IN': [1, 5]}),
1: ('user:*@example.com', {'IN': [2]}),
2: ('via-glob', {'IN': [3]}),
3: ('g1', {'IN': [4]}),
4: ('g3', {}),
5: ('directly', {'IN': [6]}),
6: ('g2', {'OWNS': [3]}),
}, self.call(b.build(), 'user:a@example.com'))
class PermissionsTest(test_case.TestCase):
def test_happy_path(self):
p1 = api.Permission('service.subject.verb')
p2 = api.Permission('service.subject.verb')
p3 = api.Permission('service.subject.another')
self.assertEqual(p1, p2)
self.assertTrue(p1 is p2)
self.assertNotEqual(p1, p3)
self.assertEqual('service.subject.verb', str(p1))
self.assertEqual("'service.subject.verb'", '%r' % (p1,))
def test_validation_errors(self):
with self.assertRaises(TypeError):
api.Permission(123)
with self.assertRaises(TypeError):
api.Permission(u'no.unicode.here')
with self.assertRaises(ValueError):
api.Permission('too.few')
with self.assertRaises(ValueError):
api.Permission('too.too.too.much')
with self.assertRaises(ValueError):
api.Permission('has..empty')
class RealmStringsTest(test_case.TestCase):
def test_happy_path(self):
self.assertEqual(api.root_realm('proj'), 'proj:@root')
self.assertEqual(api.root_realm(u'proj'), 'proj:@root')
self.assertEqual(api.legacy_realm('proj'), 'proj:@legacy')
self.assertEqual(api.legacy_realm(u'proj'), 'proj:@legacy')
def test_validation_errors(self):
with self.assertRaises(TypeError):
api.root_realm(None)
with self.assertRaises(TypeError):
api.legacy_realm(None)
with self.assertRaises(ValueError):
api.root_realm('')
with self.assertRaises(ValueError):
api.legacy_realm('')
def test_validate_realm_name(self):
self.assertIsNone(api.validate_realm_name('proj:realm'))
self.assertIsNone(api.validate_realm_name('proj:@root'))
self.assertIsNone(api.validate_realm_name('proj:@legacy'))
self.assertIsNone(api.validate_realm_name('@internal:realm'))
self.assertIsNone(api.validate_realm_name('@internal:@root'))
self.assertIsNone(api.validate_realm_name('@internal:@legacy'))
def test_validate_realm_name_errors(self):
with self.assertRaises(ValueError):
self.assertFalse(api.validate_realm_name('realm'))
with self.assertRaises(ValueError):
self.assertFalse(api.validate_realm_name('proj:@invalid'))
with self.assertRaises(ValueError):
self.assertFalse(api.validate_realm_name('proj:re:alm'))
with self.assertRaises(ValueError):
self.assertFalse(api.validate_realm_name('@proj:realm'))
PERM0 = api.Permission('luci.dev.testing0')
PERM1 = api.Permission('luci.dev.testing1')
PERM2 = api.Permission('luci.dev.testing2')
ALL_PERMS = [PERM0, PERM1, PERM2]
ID1 = model.Identity.from_bytes('user:1@example.com')
ID2 = model.Identity.from_bytes('user:2@example.com')
ID3 = model.Identity.from_bytes('user:3@example.com')
ADMIN = model.Identity.from_bytes('user:admin@example.com')
class RealmsTest(test_case.TestCase):
@staticmethod
def auth_db(realms_map, groups=None, api_version=None):
return api.AuthDB.from_proto(
replication_state=model.AuthReplicationState(),
auth_db=replication_pb2.AuthDB(
groups=[
{
'name': name,
'members': [m.to_bytes() for m in members],
'created_by': 'user:zzz@example.com',
'modified_by': 'user:zzz@example.com',
} for name, members in (groups or {}).items()
],
realms={
'api_version': api_version or realms.API_VERSION,
'permissions': [
{'name': p.name} for p in ALL_PERMS
],
'realms': [
{
'name': name,
'bindings': [
{
'permissions': [
ALL_PERMS.index(p)
for p in perms
],
'principals': [
p if isinstance(p, str) else p.to_bytes()
for p in principals
],
} for perms, principals in sorted(bindings.items())
],
'data': {
'enforce_in_service': ['data for %s' % name],
},
} for name, bindings in sorted(realms_map.items())
],
},
),
additional_client_ids=[])
def setUp(self):
super(RealmsTest, self).setUp()
self.all_perms = {p.name: p for p in ALL_PERMS}
self.mock(api, '_all_perms', self.all_perms)
self.logs = {}
for lvl in ('info', 'warning', 'error', 'exception'):
self.logs[lvl] = []
def appender(lvl): # need to capture lvl in a separate closure
return lambda msg, *args: self.logs[lvl].append(msg % args)
self.mock(api.logging, lvl, appender(lvl))
def assert_logs_empty(self, lvl):
self.assertEqual([], self.logs[lvl])
def assert_logs(self, lvl, msg):
self.assertTrue(
any(msg in m for m in self.logs[lvl]),
'%r not in %r' % (msg, self.logs[lvl]))
def assert_check(self, db, perm, realms, ident, outcome):
self.assertEqual(
outcome, db.has_permission(perm, realms, ident),
'has_permission(%r, %r, %r) is %s, but should be %s' %
(perm, realms, ident.to_bytes(), not outcome, outcome))
def test_direct_inclusion_in_binding(self):
db = self.auth_db({
'proj:@root': {},
'proj:realm': {
(PERM0, PERM1): [ID1],
(PERM0, PERM2): [ID2],
},
'proj:another/realm': {
(PERM2,): [ID1, ID3],
},
})
self.assert_check(db, PERM0, ['proj:realm'], ID1, True)
self.assert_check(db, PERM1, ['proj:realm'], ID1, True)
self.assert_check(db, PERM2, ['proj:realm'], ID1, False)
self.assert_check(db, PERM0, ['proj:realm'], ID2, True)
self.assert_check(db, PERM1, ['proj:realm'], ID2, False)
self.assert_check(db, PERM2, ['proj:realm'], ID2, True)
self.assert_check(
db, PERM2, ['proj:realm', 'proj:another/realm'], ID1, True)
self.assert_check(
db, PERM2, ['proj:realm', 'proj:another/realm'], ID3, True)
def test_inclusion_through_group(self):
db = self.auth_db({
'proj:@root': {},
'proj:realm': {
(PERM0, PERM1): ['group:empty', 'group:g1'],
(PERM0, PERM2): ['group:empty', 'group:g2'],
},
}, groups={'empty': [], 'g1': [ID1], 'g2': [ID2]})
self.assert_check(db, PERM0, ['proj:realm'], ID1, True)
self.assert_check(db, PERM1, ['proj:realm'], ID1, True)
self.assert_check(db, PERM2, ['proj:realm'], ID1, False)
self.assert_check(db, PERM0, ['proj:realm'], ID2, True)
self.assert_check(db, PERM1, ['proj:realm'], ID2, False)
self.assert_check(db, PERM2, ['proj:realm'], ID2, True)
def test_fallback_to_root(self):
db = self.auth_db({'proj:@root': {(PERM0,): [ID1]}})
self.assert_check(db, PERM0, ['proj:@root'], ID1, True)
self.assert_check(db, PERM0, ['proj:@root'], ID2, False)
self.assert_logs_empty('warning')
self.assert_check(db, PERM0, ['proj:realm'], ID1, True)
self.assert_logs('warning', 'falling back to the root')
self.assert_check(db, PERM0, ['proj:realm'], ID2, False)
self.assert_check(db, PERM0, ['proj:another/realm'], ID1, True)
def test_missing_project(self):
db = self.auth_db({})
self.assert_check(db, PERM0, ['proj:@root'], ID1, False)
self.assert_logs('warning', 'a non-existing root realm')
self.logs['warning'] = []
self.assert_check(db, PERM0, ['proj:@legacy'], ID1, False)
self.assert_logs('warning', 'doesn\'t have a root realm')
self.logs['warning'] = []
self.assert_check(db, PERM0, ['proj:another/realm'], ID1, False)
self.assert_logs('warning', 'doesn\'t have a root realm')
self.logs['warning'] = []
def test_unknown_permission(self):
unknown = api.Permission('luci.dev.unknown')
self.all_perms[unknown.name] = unknown
db = self.auth_db({'proj:realm': {(PERM0,): [ID1]}})
self.assert_logs('warning', 'is not in the AuthDB')
self.assert_check(db, unknown, ['proj:realm'], ID1, False)
self.assert_logs('warning', 'not present in the AuthDB')
def test_realms_unavailable(self):
empty = new_auth_db()
with self.assertRaises(api.RealmsError):
empty.has_permission('luci.dev.p1', ['proj:realm'], ID1)
def test_bad_api_version(self):
with self.assertRaises(api.RealmsError):
self.auth_db({}, api_version=666)
def test_bad_permission_type(self):
db = self.auth_db({})
with self.assertRaises(TypeError):
db.has_permission('luci.dev.p1', ['proj:realm'], ID1)
def test_bad_realm_names(self):
db = self.auth_db({})
for r in ['z', ':z', 'p:', 'blah blah:z', 'p:BLAH', 'p:@z', 'p:p:z']:
with self.assertRaises(ValueError):
db.has_permission(PERM0, [r], ID1)
def test_has_permission_dryrun(self):
rc = api.RequestCache()
rc._auth_db = self.auth_db(
{'proj:@root': {(PERM0,): [ID1]}}, groups={'admin': [ADMIN]})
self.mock(api, 'get_request_cache', lambda: rc)
# Match.
self.logs['info'] = []
api.has_permission_dryrun(PERM0, ['proj:@root'], True, ID1, 'admin', 'bug')
self.assert_logs('info',
"bug: has_permission_dryrun('luci.dev.testing0', ['proj:@root'], "
"'user:1@example.com'), authdb=0: match - ALLOW")
self.logs['info'] = []
api.has_permission_dryrun(PERM1, ['proj:@root'], False, ID1, 'admin', 'bug')
self.assert_logs('info',
"bug: has_permission_dryrun('luci.dev.testing1', ['proj:@root'], "
"'user:1@example.com'), authdb=0: match - DENY")
# Mismatch.
self.logs['warning'] = []
api.has_permission_dryrun(PERM0, ['proj:@root'], False, ID1, 'admin', 'bug')
self.assert_logs('warning',
"bug: has_permission_dryrun('luci.dev.testing0', ['proj:@root'], "
"'user:1@example.com'), authdb=0: mismatch - got ALLOW, want DENY")
self.logs['warning'] = []
api.has_permission_dryrun(PERM1, ['proj:@root'], True, ID1, 'admin', 'bug')
self.assert_logs('warning',
"bug: has_permission_dryrun('luci.dev.testing1', ['proj:@root'], "
"'user:1@example.com'), authdb=0: mismatch - got DENY, want ALLOW")
# Admin match.
self.logs['info'] = []
api.has_permission_dryrun(
PERM0, ['proj:@root'], True, ADMIN, 'admin', 'bug')
self.assert_logs('info',
"bug: has_permission_dryrun('luci.dev.testing0', ['proj:@root'], "
"'user:admin@example.com'), authdb=0: match - ADMIN_ALLOW")
# Blow up.
self.logs['exception'] = []
api.has_permission_dryrun(PERM1, ['@root'], True, ID1, 'admin', 'bug')
self.assert_logs('exception',
"bug: has_permission_dryrun('luci.dev.testing1', ['@root'], "
"'user:1@example.com'), authdb=0: exception ValueError, want ALLOW")
def test_realm_data(self):
db = self.auth_db({'proj:@root': {}, 'proj:r': {}})
def realm_data(realm):
r = db.get_realm_data(realm)
return r.enforce_in_service[0] if r else None
self.assertEqual('data for proj:r', realm_data('proj:r'))
self.assertEqual('data for proj:@root', realm_data('proj:@root'))
self.assertEqual('data for proj:@root', realm_data('proj:zzz'))
self.assertEqual(None, realm_data('zzz:@root'))
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
| true | true |
f7285ecfd2ae9a77128a046f84d7aa4562c2c9af | 396 | py | Python | purnkleen/wsgi.py | RommelTJ/purnkleen | 7a2c94fa0c2331cdc2f72e4d6718068bf00357c4 | [
"MIT"
] | 1 | 2017-12-22T04:48:22.000Z | 2017-12-22T04:48:22.000Z | purnkleen/wsgi.py | RommelTJ/purnkleen | 7a2c94fa0c2331cdc2f72e4d6718068bf00357c4 | [
"MIT"
] | 27 | 2018-03-05T16:21:52.000Z | 2021-03-09T04:41:16.000Z | purnkleen/wsgi.py | RommelTJ/purnkleen | 7a2c94fa0c2331cdc2f72e4d6718068bf00357c4 | [
"MIT"
] | null | null | null | """
WSGI config for purnkleen project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "purnkleen.settings")
application = get_wsgi_application()
| 23.294118 | 78 | 0.787879 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "purnkleen.settings")
application = get_wsgi_application()
| true | true |
f728607832ca55dae9a6e03f02e7c6d5ba6ef455 | 1,098 | py | Python | 1-python/python/data-types.py | Domin-Imperial/Domin-Respository | 2e531aabc113ed3511f349107695847b5c4e4320 | [
"MIT"
] | null | null | null | 1-python/python/data-types.py | Domin-Imperial/Domin-Respository | 2e531aabc113ed3511f349107695847b5c4e4320 | [
"MIT"
] | null | null | null | 1-python/python/data-types.py | Domin-Imperial/Domin-Respository | 2e531aabc113ed3511f349107695847b5c4e4320 | [
"MIT"
] | 1 | 2021-05-24T20:09:38.000Z | 2021-05-24T20:09:38.000Z | # python has several data types
# numeric data types -
# int, float
# int for whole numbers
x = 10 + 2
# python is dynamically typed, so we can assign whatever value we want to
# any variable... but type annotations are a good way to keep organized.
# float for fractional numbers or very large numbers
y: float = 10.5
# we have typical arithmetic operators
# + - * / %
# ** for exponent
# // for integer division (regular / will return fractional floats)
print(5 // 2)
print(5 / 2)
# typical comparison operators
# < <= != == >= >
# bool data type, can be True or False.
# typical boolean operators, spelled out as words
print(3 < 4 or 5 < 4)
# str data type (string)
# use single quotes or double quotes, no difference
data = "asdf"
data = 'asdf'
# index strings to get individual characters, which are still type str
data = data[0] # == 'a'
print(data)
longstring = '''
multiline
string
'''
# use "in" and "not in" operators to check for membership in any sequence
# (a str is a sequence)
print('abc' in 'abcdef') # True
# list data type
data2 = [ 3, 2, 6, 5 ]
print(data2[1]) # 2
| 21.529412 | 74 | 0.680328 |
x = 10 + 2
y: float = 10.5
print(5 // 2)
print(5 / 2)
print(3 < 4 or 5 < 4)
data = "asdf"
data = 'asdf'
data = data[0]
print(data)
longstring = '''
multiline
string
'''
print('abc' in 'abcdef')
data2 = [ 3, 2, 6, 5 ]
print(data2[1])
| true | true |
f72860c96ea3e4d3445af521dd59c24b214a53e7 | 2,032 | py | Python | HouseMarketTracker/parser/HouseHomePageParser.py | SuperFireFoxy/HouseMarketTracker | b173bec5ff1c9b056231f5bae32ff59424f4bae3 | [
"MIT"
] | null | null | null | HouseMarketTracker/parser/HouseHomePageParser.py | SuperFireFoxy/HouseMarketTracker | b173bec5ff1c9b056231f5bae32ff59424f4bae3 | [
"MIT"
] | 6 | 2018-05-15T09:36:17.000Z | 2018-05-20T07:07:40.000Z | HouseMarketTracker/parser/HouseHomePageParser.py | SuperFireFoxy/HouseMarketTracker | b173bec5ff1c9b056231f5bae32ff59424f4bae3 | [
"MIT"
] | null | null | null | import re
from HouseMarketTracker.parser.ImagesParser import ImagesParser
from HouseMarketTracker.parser.ParseUtil import ParseUtil
class HouseHomePageParser():
def parse(self, response):
meta = response.meta
item = meta['item']
item['house_layout'] = self.parse_layout(response)
images_url = meta['root_url'] + 'xiangce/'
yield from ParseUtil.start_request(images_url, ImagesParser().parse, meta)
def parse_layout(self, response):
layout_list = []
layout_ul_s = response.xpath('//ul[@class="clear house-det"]')
for layout_ul in layout_ul_s:
layout_dict = {}
img_src = layout_ul.xpath('child::*/img/@src').extract_first()
img_src = re.sub(r'\d+?x\d*', '1000x', img_src)
layout_dict['img_src'] = img_src
layout_dict['layout_type_name'] = layout_ul.xpath('child::*/span/text()').extract_first()
info_li = layout_ul.xpath('li[@class="info-li"]')
p1 = info_li.xpath('p[@class="p1"]')
layout_dict['layout_type'] = p1.xpath('text()').extract_first()
layout_dict['construction_area'] = p1.xpath('span[not(@class)]/text()').extract_first()
layout_dict['sales_status'] = p1.xpath('span[contains(@class,"p1-state")]/text()').extract_first()
p2 = info_li.xpath('p[@class="p2"]')
layout_dict['layout_price'] = re.sub(r'\n.+', '', info_li.xpath('string(p[@class="p2"])').extract_first())
layout_dict['last_update_time'] = p2.xpath('span[contains(@class,"p2-time")]/text()').extract_first()
p3 = info_li.xpath('p[@class="p3"]')
key = p3.xpath('text()').extract_first()
value = p3.xpath('span/text()').extract_first()
layout_dict[key] = value
layout_dict['tags'] = info_li.xpath('p[@class="p4"]/span/text()').extract()
layout_list.append(layout_dict)
return layout_list
def parse_unit_info(self, response):
return
| 40.64 | 118 | 0.609252 | import re
from HouseMarketTracker.parser.ImagesParser import ImagesParser
from HouseMarketTracker.parser.ParseUtil import ParseUtil
class HouseHomePageParser():
def parse(self, response):
meta = response.meta
item = meta['item']
item['house_layout'] = self.parse_layout(response)
images_url = meta['root_url'] + 'xiangce/'
yield from ParseUtil.start_request(images_url, ImagesParser().parse, meta)
def parse_layout(self, response):
layout_list = []
layout_ul_s = response.xpath('//ul[@class="clear house-det"]')
for layout_ul in layout_ul_s:
layout_dict = {}
img_src = layout_ul.xpath('child::*/img/@src').extract_first()
img_src = re.sub(r'\d+?x\d*', '1000x', img_src)
layout_dict['img_src'] = img_src
layout_dict['layout_type_name'] = layout_ul.xpath('child::*/span/text()').extract_first()
info_li = layout_ul.xpath('li[@class="info-li"]')
p1 = info_li.xpath('p[@class="p1"]')
layout_dict['layout_type'] = p1.xpath('text()').extract_first()
layout_dict['construction_area'] = p1.xpath('span[not(@class)]/text()').extract_first()
layout_dict['sales_status'] = p1.xpath('span[contains(@class,"p1-state")]/text()').extract_first()
p2 = info_li.xpath('p[@class="p2"]')
layout_dict['layout_price'] = re.sub(r'\n.+', '', info_li.xpath('string(p[@class="p2"])').extract_first())
layout_dict['last_update_time'] = p2.xpath('span[contains(@class,"p2-time")]/text()').extract_first()
p3 = info_li.xpath('p[@class="p3"]')
key = p3.xpath('text()').extract_first()
value = p3.xpath('span/text()').extract_first()
layout_dict[key] = value
layout_dict['tags'] = info_li.xpath('p[@class="p4"]/span/text()').extract()
layout_list.append(layout_dict)
return layout_list
def parse_unit_info(self, response):
return
| true | true |
f728622e2085bc58fb4e845221f472ee9780896d | 6,229 | py | Python | pybind/slxos/v17r_2_00/system_config/interface/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/system_config/interface/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | null | null | null | pybind/slxos/v17r_2_00/system_config/interface/__init__.py | extremenetworks/pybind | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | [
"Apache-2.0"
] | 1 | 2021-11-05T22:15:42.000Z | 2021-11-05T22:15:42.000Z |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class interface(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-system-watermark - based on the path /system-config/interface. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__utilization_watermark',)
_yang_name = 'interface'
_rest_name = 'interface'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__utilization_watermark = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="utilization-watermark", rest_name="utilization-watermark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Enable Port utilization watermark (Default: Enabled)', u'cli-show-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-watermark', defining_module='brocade-system-watermark', yang_type='empty', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'system-config', u'interface']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'system', u'interface']
def _get_utilization_watermark(self):
"""
Getter method for utilization_watermark, mapped from YANG variable /system_config/interface/utilization_watermark (empty)
"""
return self.__utilization_watermark
def _set_utilization_watermark(self, v, load=False):
"""
Setter method for utilization_watermark, mapped from YANG variable /system_config/interface/utilization_watermark (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_utilization_watermark is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_utilization_watermark() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="utilization-watermark", rest_name="utilization-watermark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Enable Port utilization watermark (Default: Enabled)', u'cli-show-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-watermark', defining_module='brocade-system-watermark', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """utilization_watermark must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="utilization-watermark", rest_name="utilization-watermark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Enable Port utilization watermark (Default: Enabled)', u'cli-show-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-watermark', defining_module='brocade-system-watermark', yang_type='empty', is_config=True)""",
})
self.__utilization_watermark = t
if hasattr(self, '_set'):
self._set()
def _unset_utilization_watermark(self):
self.__utilization_watermark = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="utilization-watermark", rest_name="utilization-watermark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Enable Port utilization watermark (Default: Enabled)', u'cli-show-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-watermark', defining_module='brocade-system-watermark', yang_type='empty', is_config=True)
utilization_watermark = __builtin__.property(_get_utilization_watermark, _set_utilization_watermark)
_pyangbind_elements = {'utilization_watermark': utilization_watermark, }
| 50.642276 | 525 | 0.728046 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class interface(PybindBase):
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__utilization_watermark',)
_yang_name = 'interface'
_rest_name = 'interface'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__utilization_watermark = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="utilization-watermark", rest_name="utilization-watermark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Enable Port utilization watermark (Default: Enabled)', u'cli-show-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-watermark', defining_module='brocade-system-watermark', yang_type='empty', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'system-config', u'interface']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'system', u'interface']
def _get_utilization_watermark(self):
return self.__utilization_watermark
def _set_utilization_watermark(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="utilization-watermark", rest_name="utilization-watermark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Enable Port utilization watermark (Default: Enabled)', u'cli-show-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-watermark', defining_module='brocade-system-watermark', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """utilization_watermark must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="utilization-watermark", rest_name="utilization-watermark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Enable Port utilization watermark (Default: Enabled)', u'cli-show-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-watermark', defining_module='brocade-system-watermark', yang_type='empty', is_config=True)""",
})
self.__utilization_watermark = t
if hasattr(self, '_set'):
self._set()
def _unset_utilization_watermark(self):
self.__utilization_watermark = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="utilization-watermark", rest_name="utilization-watermark", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Enable Port utilization watermark (Default: Enabled)', u'cli-show-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-watermark', defining_module='brocade-system-watermark', yang_type='empty', is_config=True)
utilization_watermark = __builtin__.property(_get_utilization_watermark, _set_utilization_watermark)
_pyangbind_elements = {'utilization_watermark': utilization_watermark, }
| true | true |
f728637d14058244193eaac66b95488429463bb6 | 1,266 | py | Python | app/api/v1/models/user_models.py | Deekerubo/Store-Manager-API | 02ed2ce0c82d6854cc531a33a85bf9b8dff007fa | [
"MIT"
] | null | null | null | app/api/v1/models/user_models.py | Deekerubo/Store-Manager-API | 02ed2ce0c82d6854cc531a33a85bf9b8dff007fa | [
"MIT"
] | 2 | 2018-12-10T12:03:16.000Z | 2019-10-21T16:53:02.000Z | app/api/v1/models/user_models.py | Deekerubo/Store-Manager-API | 02ed2ce0c82d6854cc531a33a85bf9b8dff007fa | [
"MIT"
] | null | null | null |
from flask_restful import Resource, reqparse
from flask import Flask,request, make_response
from passlib.hash import pbkdf2_sha256 as sha256
users_list = []
class User():
def __init__(self, email, password):
self.user_id = len(users_list)+1
self.email = email
self.password = password
def save_user(self):
""" save a new user """
user = dict(user_id=self.user_id,
email=self.email,
password=self.password)
users_list.append(user)
return user
@classmethod
def fetch_single_user(cls, email):
""" Method to get a user"""
for user in users_list:
if user['email'] == email:
return user
return f"User of ID {email} doesn't exist"
@staticmethod
def generate_hash(password):
return sha256.hash(password)
@staticmethod
def verify_hash(password,email):
user = next((item for item in users_list if item["email"] == email), False)
if user == False:
return False
return sha256.verify(password, user['password'] )
@staticmethod
def find_by_email(email):
return next((item for item in users_list if item["email"] == email), False)
| 28.133333 | 84 | 0.611374 |
from flask_restful import Resource, reqparse
from flask import Flask,request, make_response
from passlib.hash import pbkdf2_sha256 as sha256
users_list = []
class User():
def __init__(self, email, password):
self.user_id = len(users_list)+1
self.email = email
self.password = password
def save_user(self):
user = dict(user_id=self.user_id,
email=self.email,
password=self.password)
users_list.append(user)
return user
@classmethod
def fetch_single_user(cls, email):
for user in users_list:
if user['email'] == email:
return user
return f"User of ID {email} doesn't exist"
@staticmethod
def generate_hash(password):
return sha256.hash(password)
@staticmethod
def verify_hash(password,email):
user = next((item for item in users_list if item["email"] == email), False)
if user == False:
return False
return sha256.verify(password, user['password'] )
@staticmethod
def find_by_email(email):
return next((item for item in users_list if item["email"] == email), False)
| true | true |
f72863c4b8fbd54a1962f10f47b2584a72ee40bc | 11,725 | py | Python | services/PacketSniffer/PacketSniffer.py | ctgriffiths/twister | b3930549551b0104738d56f402eb9b4b90dd692c | [
"Apache-2.0"
] | 19 | 2015-01-29T11:02:42.000Z | 2021-06-03T11:45:42.000Z | services/PacketSniffer/PacketSniffer.py | ctgriffiths/twister | b3930549551b0104738d56f402eb9b4b90dd692c | [
"Apache-2.0"
] | 47 | 2015-01-02T11:39:39.000Z | 2022-02-05T11:29:07.000Z | services/PacketSniffer/PacketSniffer.py | ctgriffiths/twister | b3930549551b0104738d56f402eb9b4b90dd692c | [
"Apache-2.0"
] | 10 | 2015-01-12T07:24:39.000Z | 2017-11-05T00:17:30.000Z | #!/usr/bin/env python
# version: 3.001
#
# -*- coding: utf-8 -*-
#
# File: PacketSniffer.py ; This file is part of Twister.
#
# Copyright (C) 2012-2013 , Luxoft
#
# Authors:
# Adrian Toader <adtoader@luxoft.com>
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from binascii import b2a_base64
from rpyc import Service as rpycService
from rpyc import connect as rpycConnect
from rpyc.utils.helpers import BgServingThread as rpycBgServingThread
from uuid import uuid4
from time import sleep
from copy import deepcopy
from thread import allocate_lock
from scapy.all import Automaton, ATMT, TCP, bind_layers, Packet, NoPayload, Raw
#from PacketSnifferClasses import OpenFlow, CentralEngineObject
from sys import maxsize
from socket import gethostname, gethostbyname, socket, AF_INET, SOCK_DGRAM, inet_ntoa
from fcntl import ioctl
from struct import unpack, pack
from array import array
def all_interfaces():
""" """
is_64bits = maxsize > 2**32
struct_size = 40 if is_64bits else 32
sock = socket(AF_INET, SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array('B', '\0' * bytes)
outbytes = unpack('iL', ioctl(
sock.fileno(),
0x8912, # SIOCGIFCONF
pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split('\0', 1)[0],
inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
class Sniffer(Automaton):
"""
Packet Sniffer Scapy Automaton
"""
def parse_args(self, user, epConfig, OFPort=None, iface=None):
Automaton.parse_args(self)
self.has_iface = None
if iface:
ifaces = all_interfaces()
for _iface in ifaces:
if iface in _iface:
self.has_iface = True
self.socket_kargs = {'iface': iface, }
if not self.has_iface:
self.has_iface = False
print('PT debug: set iface error: no such device')
self.PAUSED = False
self.OFPort = (OFPort, 6633)[OFPort is None]
# openflow packet model connect
#bind_layers(TCP, OpenFlow, sport=self.OFPort)
#bind_layers(TCP, OpenFlow, dport=self.OFPort)
# packet filters
self.filters = None
# user
self.username = user
self.userip = gethostbyname(gethostname())
self.userhost = gethostname()
# CE / EP
self.epConfig = epConfig
self.ceTraffic = list(set([(ep['CE_IP'], ep['CE_PORT']) for ep in self.epConfig]))
self.uid = uuid4()
self.reinitRetries = 0
self.reinitMaxRetries = 4
#
PacketSnifferService.sniffer = self
def master_filter(self, packet):
""" """
packetHead = self.packet_head_parse(packet)
# default filter: exclude CE traffic
if ((packetHead['source']['ip'], str(packetHead['source']['port'])) in self.ceTraffic or
(packetHead['destination']['ip'], str(packetHead['destination']['port'])) in self.ceTraffic):
return False
if not self.filters: return True
filterStatus = True
if self.filters.has_key('-proto'):
pkt = packet
protocols = []
while not isinstance(pkt, NoPayload):
protocols.append(pkt.name)
pkt = pkt.payload
filterStatus = self.filters['-proto'] in protocols
if self.filters.has_key('-mac_src'):
filterStatus = (self.filters['-mac_src'] ==
packetHead['source']['mac'])
if self.filters.has_key('-mac_dst'):
filterStatus = (self.filters['-mac_dst'] ==
packetHead['destination']['mac'])
if self.filters.has_key('-port_src'):
filterStatus = (self.filters['-port_src'] ==
str(packetHead['source']['port']))
if self.filters.has_key('-port_dst'):
filterStatus = (self.filters['-port_dst'] ==
str(packetHead['destination']['port']))
if self.filters.has_key('-ip_src'):
filterStatus = (self.filters['-ip_src'] ==
packetHead['source']['ip'])
if self.filters.has_key('-ip_dst'):
filterStatus = (self.filters['-ip_dst'] ==
packetHead['destination']['ip'])
return filterStatus
def registerCE(self, ce_list):
""" """
print('PT: starting register..')
registered = False
for ce in ce_list:
try:
# Try to connect to CE!
connection = rpycConnect(host=ce[0],
port=int(ce[1]) + 10,
service=PacketSnifferService,
config={
'allow_all_attrs': True,
'allow_pickle': True,
'allow_getattr': True,
'allow_setattr': True,
'allow_delattr': True})
with PacketSnifferService.connectionsLock:
if PacketSnifferService.connections.has_key(connection._config['connid']):
PacketSnifferService.connections[connection._config['connid']].update([('host',
'{}:{}'.format(ce[0], ce[1])), ])
# hello
hello = connection.root.hello('sniffer')
if not hello:
print('PT warning: Could not send hello to central engine {}..'.format(ce))
continue
# authenticate
authenticated = connection.root.login(self.username, 'EP')
if not authenticated:
print('PT warning: Could not authenticate to central engine {}..'.format(ce))
continue
rpycBgServingThread(connection)
# create user if ep is not running
#connection.root.list_eps()
registered = True
self.reinitRetries = 0
print('PT info: Registered to central engine {}..'.format(ce))
except Exception as e:
print('PT warning: Central Engine is down .... [{0}]'.format(e))
if not registered:
if self.reinitRetries < self.reinitMaxRetries:
print('PT debug: no central engines; will retry [{r}] ..'.format(r=self.reinitRetries))
self.reinitRetries += 1
sleep(2)
self.registerCE(ce_list)
else:
raise self.END()
if not registered:
return False
print('PT: register end.')
return True
def packet_head_parse(self, packet):
""" """
source = {}
destination = {}
try:
source['mac'] = packet.fields['src']
destination['mac'] = packet.fields['dst']
try:
source['ip'] = packet.payload.fields['src']
destination['ip'] = packet.payload.fields['dst']
except Exception as e:
source['ip'] = 'None'
destination['ip'] = 'None'
#print('PT debug: packet head exception (ip): {ex}'.format(ex=e))
try:
source['port'] = packet.payload.payload.fields['sport']
destination['port'] = packet.payload.payload.fields['dport']
except Exception as e:
source['port'] = 'None'
destination['port'] = 'None'
#print('PT debug: packet head exception (port): {ex}'.format(ex=e))
except Exception as e:
source['mac'] = 'None'
destination['mac'] = 'None'
#print('PT debug: packet head exception (mac): {ex}'.format(ex=e))
data = {
'protocol': packet.payload.payload.name,
'source': source,
'destination': destination,
}
return data
# BEGIN
@ATMT.state(initial=1)
def BEGIN(self):
""" """
print('|||| BEGIN ||||')
response = self.registerCE(self.ceTraffic)
if not response:
raise self.END()
raise self.WAITING()
# WAITING
@ATMT.state()
def WAITING(self):
""" """
pass
# RECEIVING
@ATMT.receive_condition(WAITING)
def receive_data(self, pkt):
""" """
if self.has_iface is not None and not self.has_iface:
raise self.WAITING()
raise (self.RECEIVING(pkt), self.WAITING())[self.PAUSED]
# RECEIVED
@ATMT.state()
def RECEIVING(self, packet):
""" """
data = {
'sniffer': {
'ip': self.userip,
'hostname': self.userhost,
'username': self.username,
},
'packet_head': self.packet_head_parse(packet),
'packet_source': str(packet),
}
data['packet_head'].update([('id', str(uuid4())), ])
with PacketSnifferService.connectionsLock:
for conn in PacketSnifferService.connections:
if PacketSnifferService.connections[conn]:
try:
response = PacketSnifferService.connections[conn]['root'].run_plugin('PacketSnifferPlugin',
{'command': 'pushpkt',
'data': data})
if (not isinstance(response, dict) or not response.has_key('status') or
not response['status']['success']):
print('PT debug: Push packet error: {}'.format(response))
except Exception as e:
print('PT debug: Push packet error: {}'.format(e))
#pass
raise self.WAITING()
# END
@ATMT.state(final=1)
def END(self):
""" """
print('|||| END ||||')
"""
# EVENTS
@ATMT.ioevent(BEGIN, name='commands')
def transition(self, fd):
print 'in trans'
commands = ['start', 'pause', 'resume', 'restart', 'stop']
command = fd.recv()
if command in commands:
print 'PT debug: got command {cmd}'.format(cmd=command)
if command == 'start':
self.PAUSED = False
elif command == 'pause':
self.PAUSED = True
elif command == 'resume':
self.PAUSED = False
elif command == 'restart':
self.restart()
raise self.WAITING()
elif command == 'stop':
raise self.END()
"""
class PacketSnifferService(rpycService):
""" """
sniffer = None
connections = dict()
connectionsLock = allocate_lock()
def on_connect(self):
""" """
try:
client_addr = self._conn._config['connid']
#client_addr = self._conn._config['endpoints'][1]
with self.connectionsLock:
self.connections.update([(client_addr, {'root': self._conn.root}), ])
print('PT debug: Connected from `{}`.'.format(client_addr))
except Exception as e:
print('PT debug: Connect error: {er}'.format(er=e))
def on_disconnect(self):
""" """
try:
client = None
client_addr = self._conn._config['connid']
#client_addr = self._conn._config['endpoints'][1]
with self.connectionsLock:
client = self.connections.pop(client_addr)
print('PT debug: Disconnected from `{}`.'.format(client_addr))
sleep(2)
if self.sniffer and not self.connections:
print('PT debug: no connections.. trying to re-register..')
self.sniffer.registerCE(self.sniffer.ceTraffic)
elif self.sniffer and client:
print('PT debug: {} connection is down.. trying to re-register..'.format(client))
client = client['host'].split(':')
self.sniffer.registerCE([(client[0], client[1])])
except Exception as e:
print('PT debug: Disconnect error: {er}'.format(er=e))
def exposed_start(self):
""" """
if not self.sniffer:
return False
if not self.sniffer.PAUSED:
return False
self.sniffer.PAUSED = False
print('PT debug: sniffer status chaged: running')
return True
def exposed_pause(self):
""" """
if not self.sniffer:
return False
if self.sniffer.PAUSED:
return False
self.sniffer.PAUSED = True
print('PT debug: sniffer status chaged: paused')
return True
def exposed_resume(self):
""" """
if not self.sniffer:
return False
if not self.sniffer.PAUSED:
return False
self.sniffer.PAUSED = False
print('PT debug: sniffer status chaged: running')
return True
#def exposed_stop(self):
# """ """
# ##
# return True
#def exposed_restart(self):
# """ """
#
# if not self.sniffer:
# return False
#
# self.sniffer.stop()
# sleep(2)
# self.sniffer.runbg()
#
# return True
def exposed_set_filters(self, filters):
""" """
if not self.sniffer:
return False
self.sniffer.filters = deepcopy(filters)
return True
| 23.080709 | 97 | 0.655011 |
from binascii import b2a_base64
from rpyc import Service as rpycService
from rpyc import connect as rpycConnect
from rpyc.utils.helpers import BgServingThread as rpycBgServingThread
from uuid import uuid4
from time import sleep
from copy import deepcopy
from thread import allocate_lock
from scapy.all import Automaton, ATMT, TCP, bind_layers, Packet, NoPayload, Raw
from sys import maxsize
from socket import gethostname, gethostbyname, socket, AF_INET, SOCK_DGRAM, inet_ntoa
from fcntl import ioctl
from struct import unpack, pack
from array import array
def all_interfaces():
is_64bits = maxsize > 2**32
struct_size = 40 if is_64bits else 32
sock = socket(AF_INET, SOCK_DGRAM)
max_possible = 8
while True:
bytes = max_possible * struct_size
names = array('B', '\0' * bytes)
outbytes = unpack('iL', ioctl(
sock.fileno(),
0x8912,
pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split('\0', 1)[0],
inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
class Sniffer(Automaton):
def parse_args(self, user, epConfig, OFPort=None, iface=None):
Automaton.parse_args(self)
self.has_iface = None
if iface:
ifaces = all_interfaces()
for _iface in ifaces:
if iface in _iface:
self.has_iface = True
self.socket_kargs = {'iface': iface, }
if not self.has_iface:
self.has_iface = False
print('PT debug: set iface error: no such device')
self.PAUSED = False
self.OFPort = (OFPort, 6633)[OFPort is None]
self.filters = None
self.username = user
self.userip = gethostbyname(gethostname())
self.userhost = gethostname()
self.epConfig = epConfig
self.ceTraffic = list(set([(ep['CE_IP'], ep['CE_PORT']) for ep in self.epConfig]))
self.uid = uuid4()
self.reinitRetries = 0
self.reinitMaxRetries = 4
PacketSnifferService.sniffer = self
def master_filter(self, packet):
packetHead = self.packet_head_parse(packet)
if ((packetHead['source']['ip'], str(packetHead['source']['port'])) in self.ceTraffic or
(packetHead['destination']['ip'], str(packetHead['destination']['port'])) in self.ceTraffic):
return False
if not self.filters: return True
filterStatus = True
if self.filters.has_key('-proto'):
pkt = packet
protocols = []
while not isinstance(pkt, NoPayload):
protocols.append(pkt.name)
pkt = pkt.payload
filterStatus = self.filters['-proto'] in protocols
if self.filters.has_key('-mac_src'):
filterStatus = (self.filters['-mac_src'] ==
packetHead['source']['mac'])
if self.filters.has_key('-mac_dst'):
filterStatus = (self.filters['-mac_dst'] ==
packetHead['destination']['mac'])
if self.filters.has_key('-port_src'):
filterStatus = (self.filters['-port_src'] ==
str(packetHead['source']['port']))
if self.filters.has_key('-port_dst'):
filterStatus = (self.filters['-port_dst'] ==
str(packetHead['destination']['port']))
if self.filters.has_key('-ip_src'):
filterStatus = (self.filters['-ip_src'] ==
packetHead['source']['ip'])
if self.filters.has_key('-ip_dst'):
filterStatus = (self.filters['-ip_dst'] ==
packetHead['destination']['ip'])
return filterStatus
def registerCE(self, ce_list):
print('PT: starting register..')
registered = False
for ce in ce_list:
try:
connection = rpycConnect(host=ce[0],
port=int(ce[1]) + 10,
service=PacketSnifferService,
config={
'allow_all_attrs': True,
'allow_pickle': True,
'allow_getattr': True,
'allow_setattr': True,
'allow_delattr': True})
with PacketSnifferService.connectionsLock:
if PacketSnifferService.connections.has_key(connection._config['connid']):
PacketSnifferService.connections[connection._config['connid']].update([('host',
'{}:{}'.format(ce[0], ce[1])), ])
hello = connection.root.hello('sniffer')
if not hello:
print('PT warning: Could not send hello to central engine {}..'.format(ce))
continue
authenticated = connection.root.login(self.username, 'EP')
if not authenticated:
print('PT warning: Could not authenticate to central engine {}..'.format(ce))
continue
rpycBgServingThread(connection)
registered = True
self.reinitRetries = 0
print('PT info: Registered to central engine {}..'.format(ce))
except Exception as e:
print('PT warning: Central Engine is down .... [{0}]'.format(e))
if not registered:
if self.reinitRetries < self.reinitMaxRetries:
print('PT debug: no central engines; will retry [{r}] ..'.format(r=self.reinitRetries))
self.reinitRetries += 1
sleep(2)
self.registerCE(ce_list)
else:
raise self.END()
if not registered:
return False
print('PT: register end.')
return True
def packet_head_parse(self, packet):
source = {}
destination = {}
try:
source['mac'] = packet.fields['src']
destination['mac'] = packet.fields['dst']
try:
source['ip'] = packet.payload.fields['src']
destination['ip'] = packet.payload.fields['dst']
except Exception as e:
source['ip'] = 'None'
destination['ip'] = 'None'
try:
source['port'] = packet.payload.payload.fields['sport']
destination['port'] = packet.payload.payload.fields['dport']
except Exception as e:
source['port'] = 'None'
destination['port'] = 'None'
except Exception as e:
source['mac'] = 'None'
destination['mac'] = 'None'
data = {
'protocol': packet.payload.payload.name,
'source': source,
'destination': destination,
}
return data
@ATMT.state(initial=1)
def BEGIN(self):
print('|||| BEGIN ||||')
response = self.registerCE(self.ceTraffic)
if not response:
raise self.END()
raise self.WAITING()
@ATMT.state()
def WAITING(self):
pass
@ATMT.receive_condition(WAITING)
def receive_data(self, pkt):
if self.has_iface is not None and not self.has_iface:
raise self.WAITING()
raise (self.RECEIVING(pkt), self.WAITING())[self.PAUSED]
@ATMT.state()
def RECEIVING(self, packet):
data = {
'sniffer': {
'ip': self.userip,
'hostname': self.userhost,
'username': self.username,
},
'packet_head': self.packet_head_parse(packet),
'packet_source': str(packet),
}
data['packet_head'].update([('id', str(uuid4())), ])
with PacketSnifferService.connectionsLock:
for conn in PacketSnifferService.connections:
if PacketSnifferService.connections[conn]:
try:
response = PacketSnifferService.connections[conn]['root'].run_plugin('PacketSnifferPlugin',
{'command': 'pushpkt',
'data': data})
if (not isinstance(response, dict) or not response.has_key('status') or
not response['status']['success']):
print('PT debug: Push packet error: {}'.format(response))
except Exception as e:
print('PT debug: Push packet error: {}'.format(e))
raise self.WAITING()
@ATMT.state(final=1)
def END(self):
print('|||| END ||||')
class PacketSnifferService(rpycService):
sniffer = None
connections = dict()
connectionsLock = allocate_lock()
def on_connect(self):
try:
client_addr = self._conn._config['connid']
with self.connectionsLock:
self.connections.update([(client_addr, {'root': self._conn.root}), ])
print('PT debug: Connected from `{}`.'.format(client_addr))
except Exception as e:
print('PT debug: Connect error: {er}'.format(er=e))
def on_disconnect(self):
try:
client = None
client_addr = self._conn._config['connid']
with self.connectionsLock:
client = self.connections.pop(client_addr)
print('PT debug: Disconnected from `{}`.'.format(client_addr))
sleep(2)
if self.sniffer and not self.connections:
print('PT debug: no connections.. trying to re-register..')
self.sniffer.registerCE(self.sniffer.ceTraffic)
elif self.sniffer and client:
print('PT debug: {} connection is down.. trying to re-register..'.format(client))
client = client['host'].split(':')
self.sniffer.registerCE([(client[0], client[1])])
except Exception as e:
print('PT debug: Disconnect error: {er}'.format(er=e))
def exposed_start(self):
if not self.sniffer:
return False
if not self.sniffer.PAUSED:
return False
self.sniffer.PAUSED = False
print('PT debug: sniffer status chaged: running')
return True
def exposed_pause(self):
if not self.sniffer:
return False
if self.sniffer.PAUSED:
return False
self.sniffer.PAUSED = True
print('PT debug: sniffer status chaged: paused')
return True
def exposed_resume(self):
if not self.sniffer:
return False
if not self.sniffer.PAUSED:
return False
self.sniffer.PAUSED = False
print('PT debug: sniffer status chaged: running')
return True
def exposed_set_filters(self, filters):
if not self.sniffer:
return False
self.sniffer.filters = deepcopy(filters)
return True
| true | true |
f72863ca7b1b865d798b9801a38bbfb1be6fdca9 | 33 | py | Python | src/estimate/__init__.py | haoyio/stock-opt | 4a7dc730693517fd25ddd5c105361f73b51f11ac | [
"Apache-2.0"
] | null | null | null | src/estimate/__init__.py | haoyio/stock-opt | 4a7dc730693517fd25ddd5c105361f73b51f11ac | [
"Apache-2.0"
] | null | null | null | src/estimate/__init__.py | haoyio/stock-opt | 4a7dc730693517fd25ddd5c105361f73b51f11ac | [
"Apache-2.0"
] | null | null | null | from estimate.constants import *
| 16.5 | 32 | 0.818182 | from estimate.constants import *
| true | true |
f72863e3fb5fd6105a714482bf8ca2e56058339c | 2,328 | py | Python | PyUtilities/OneDConsolidation.py | MingAtUWA/SimpleMPM2 | 7a1d7c257c621123d85a0630e93d42ae25c70fb4 | [
"MIT"
] | null | null | null | PyUtilities/OneDConsolidation.py | MingAtUWA/SimpleMPM2 | 7a1d7c257c621123d85a0630e93d42ae25c70fb4 | [
"MIT"
] | 2 | 2020-10-19T02:03:11.000Z | 2021-03-19T16:34:39.000Z | PyUtilities/OneDConsolidation.py | MingAtUWA/SimpleMPM2 | 7a1d7c257c621123d85a0630e93d42ae25c70fb4 | [
"MIT"
] | 1 | 2020-04-28T00:33:14.000Z | 2020-04-28T00:33:14.000Z | import math
import numpy as np
import matplotlib.pyplot as plt
class OneDConsolidation:
"""
z = 0, free flow boundary condition
z = H, impermeable boundary condition
Parameters:
1. Cv, coefficient of consolidation;
2. Es, one dimensional compressive modulus
3. u0, initial pore pressure;
4. H, depth of soil;
5. error_ratio, used to control the calculation precision.
"""
def __init__(self, Cv, Es, u0, H, error_ratio = 1.0e-3):
self.Cv = Cv
self.Es = Es
self.u0 = u0
self.H = H
# Final settlement
self.dH_final = -H * u0 / Es
self.error_ratio = error_ratio
def calPorePressure(self, t, z):
Tv = self.Cv * t / (self.H * self.H)
p = 0.0
z = z / self.H
i = 0
while True:
M = (2*i+1) * math.pi / 2.0
inc = 2.0/M * math.sin(M*z) * math.exp(-M*M*Tv)
p += inc
i += 1
if abs(inc) < self.error_ratio:
break
if (p > 1.0): p = 1.0
p *= self.u0
return p
def calSettlement(self, t):
Tv = self.Cv * t / (self.H * self.H)
dH = 0.0
i = 0
while True:
M = (2*i+1) * math.pi / 2.0
inc = 2.0/(M*M) * math.exp(-M*M*Tv)
dH += inc
i += 1
if abs(inc) < self.error_ratio:
break
dH = self.dH_final * (1.0 - dH)
return dH
if __name__ == "__main__":
Es = 40.0e6
kv = 1.0e-5
miu = 1.0 # dynamic viscosity
Cv = kv * Es / miu
u0 = 40.0e3
H = 10.0
con_res = OneDConsolidation(Cv, Es, u0, H)
fig = plt.figure()
plot1 = fig.subplots(1, 1)
plot1.set_title('Settlement - Time relation')
plot1.set_xlabel('Time')
plot1.set_ylabel('Settlement')
data_num = 100
t_list = np.zeros(data_num)
p_list = np.zeros(data_num)
u_list = np.zeros(data_num)
for i in range(data_num):
t_list[i] = 0.01 * float(i)
p_list[i] = con_res.calPorePressure(t_list[i], 10.0)
u_list[i] = con_res.calSettlement(t_list[i])
plot1.set_xlim([t_list[0], t_list[data_num-1]])
plot1.plot(t_list, p_list, 'k--')
#plot1.plot(t_list, u_list, 'k--')
plt.show()
| 27.388235 | 66 | 0.514605 | import math
import numpy as np
import matplotlib.pyplot as plt
class OneDConsolidation:
def __init__(self, Cv, Es, u0, H, error_ratio = 1.0e-3):
self.Cv = Cv
self.Es = Es
self.u0 = u0
self.H = H
self.dH_final = -H * u0 / Es
self.error_ratio = error_ratio
def calPorePressure(self, t, z):
Tv = self.Cv * t / (self.H * self.H)
p = 0.0
z = z / self.H
i = 0
while True:
M = (2*i+1) * math.pi / 2.0
inc = 2.0/M * math.sin(M*z) * math.exp(-M*M*Tv)
p += inc
i += 1
if abs(inc) < self.error_ratio:
break
if (p > 1.0): p = 1.0
p *= self.u0
return p
def calSettlement(self, t):
Tv = self.Cv * t / (self.H * self.H)
dH = 0.0
i = 0
while True:
M = (2*i+1) * math.pi / 2.0
inc = 2.0/(M*M) * math.exp(-M*M*Tv)
dH += inc
i += 1
if abs(inc) < self.error_ratio:
break
dH = self.dH_final * (1.0 - dH)
return dH
if __name__ == "__main__":
Es = 40.0e6
kv = 1.0e-5
miu = 1.0
Cv = kv * Es / miu
u0 = 40.0e3
H = 10.0
con_res = OneDConsolidation(Cv, Es, u0, H)
fig = plt.figure()
plot1 = fig.subplots(1, 1)
plot1.set_title('Settlement - Time relation')
plot1.set_xlabel('Time')
plot1.set_ylabel('Settlement')
data_num = 100
t_list = np.zeros(data_num)
p_list = np.zeros(data_num)
u_list = np.zeros(data_num)
for i in range(data_num):
t_list[i] = 0.01 * float(i)
p_list[i] = con_res.calPorePressure(t_list[i], 10.0)
u_list[i] = con_res.calSettlement(t_list[i])
plot1.set_xlim([t_list[0], t_list[data_num-1]])
plot1.plot(t_list, p_list, 'k--')
plt.show()
| true | true |
f7286403498a555edb43f7ec2a30c712369a4942 | 2,522 | py | Python | cinder/cmd/volume.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | cinder/cmd/volume.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | cinder/cmd/volume.py | yanheven/cinder | 89797971f30d547acbf715fea099c52d90966d1f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Cinder Volume."""
import os
import eventlet
from cinder import objects
if os.name == 'nt':
# eventlet monkey patching the os module causes subprocess.Popen to fail
# on Windows when using pipes due to missing non-blocking IO support.
eventlet.monkey_patch(os=False)
else:
eventlet.monkey_patch()
import sys
import warnings
warnings.simplefilter('once', DeprecationWarning)
from oslo_config import cfg
from oslo_log import log as logging
from cinder import i18n
i18n.enable_lazy()
# Need to register global_opts
from cinder.common import config # noqa
from cinder import service
from cinder import utils
from cinder import version
deprecated_host_opt = cfg.DeprecatedOpt('host')
host_opt = cfg.StrOpt('backend_host', help='Backend override of host value.',
deprecated_opts=[deprecated_host_opt])
cfg.CONF.register_cli_opt(host_opt)
CONF = cfg.CONF
def main():
objects.register_all()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
utils.monkey_patch()
launcher = service.get_launcher()
if CONF.enabled_backends:
for backend in CONF.enabled_backends:
CONF.register_opt(host_opt, group=backend)
backend_host = getattr(CONF, backend).backend_host
host = "%s@%s" % (backend_host or CONF.host, backend)
server = service.Service.create(host=host,
service_name=backend,
binary='cinder-volume')
launcher.launch_service(server)
else:
server = service.Service.create(binary='cinder-volume')
launcher.launch_service(server)
launcher.wait()
| 32.333333 | 78 | 0.697859 |
import os
import eventlet
from cinder import objects
if os.name == 'nt':
eventlet.monkey_patch(os=False)
else:
eventlet.monkey_patch()
import sys
import warnings
warnings.simplefilter('once', DeprecationWarning)
from oslo_config import cfg
from oslo_log import log as logging
from cinder import i18n
i18n.enable_lazy()
from cinder.common import config
from cinder import service
from cinder import utils
from cinder import version
deprecated_host_opt = cfg.DeprecatedOpt('host')
host_opt = cfg.StrOpt('backend_host', help='Backend override of host value.',
deprecated_opts=[deprecated_host_opt])
cfg.CONF.register_cli_opt(host_opt)
CONF = cfg.CONF
def main():
objects.register_all()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
utils.monkey_patch()
launcher = service.get_launcher()
if CONF.enabled_backends:
for backend in CONF.enabled_backends:
CONF.register_opt(host_opt, group=backend)
backend_host = getattr(CONF, backend).backend_host
host = "%s@%s" % (backend_host or CONF.host, backend)
server = service.Service.create(host=host,
service_name=backend,
binary='cinder-volume')
launcher.launch_service(server)
else:
server = service.Service.create(binary='cinder-volume')
launcher.launch_service(server)
launcher.wait()
| true | true |
f728642f57feb750b04c0e857b4ecb83e8475772 | 1,299 | py | Python | src/named_entity_recognition/api_ner/google_api_repository.py | brunnurs/proton | 057889e2bcefd2e7e6bc3b0fcdf418a2123767a0 | [
"Apache-2.0"
] | null | null | null | src/named_entity_recognition/api_ner/google_api_repository.py | brunnurs/proton | 057889e2bcefd2e7e6bc3b0fcdf418a2123767a0 | [
"Apache-2.0"
] | null | null | null | src/named_entity_recognition/api_ner/google_api_repository.py | brunnurs/proton | 057889e2bcefd2e7e6bc3b0fcdf418a2123767a0 | [
"Apache-2.0"
] | null | null | null | import requests
import json
import requests
def remote_named_entity_recognition(document, ner_api_secret):
assert ner_api_secret and ner_api_secret != 'PLEASE_ADD_YOUR_OWN_GOOGLE_API_KEY_HERE', "Please add you Google API Key for Named Entity Recognition"
payload = {
"document": {
"type": "PLAIN_TEXT",
"content": document,
"language": "en" # we need to set the language manually, as the google language detection sometimes fails due to e.g. Dutch names
},
"encodingType": 'UTF8'
}
parameters = {
'key': ner_api_secret
}
url = 'https://language.googleapis.com/v1beta2/documents:analyzeEntities'
response = requests.post(url, json=payload, params=parameters)
if response.status_code != 200:
print("ERROR!!! HTTP: {}. for request '{}'".format(response.status_code, document))
print(response.text)
return None
else:
print("HTTP: {}. for request '{}'".format(response.status_code, document))
return json.loads(response.text)
if __name__ == "__main__":
result = remote_named_entity_recognition("Find job id and date of hire for those employees who was hired between November 5th, 2007 and July 5th, 2009.")
if result:
print(result)
| 31.682927 | 157 | 0.666667 | import requests
import json
import requests
def remote_named_entity_recognition(document, ner_api_secret):
assert ner_api_secret and ner_api_secret != 'PLEASE_ADD_YOUR_OWN_GOOGLE_API_KEY_HERE', "Please add you Google API Key for Named Entity Recognition"
payload = {
"document": {
"type": "PLAIN_TEXT",
"content": document,
"language": "en"
},
"encodingType": 'UTF8'
}
parameters = {
'key': ner_api_secret
}
url = 'https://language.googleapis.com/v1beta2/documents:analyzeEntities'
response = requests.post(url, json=payload, params=parameters)
if response.status_code != 200:
print("ERROR!!! HTTP: {}. for request '{}'".format(response.status_code, document))
print(response.text)
return None
else:
print("HTTP: {}. for request '{}'".format(response.status_code, document))
return json.loads(response.text)
if __name__ == "__main__":
result = remote_named_entity_recognition("Find job id and date of hire for those employees who was hired between November 5th, 2007 and July 5th, 2009.")
if result:
print(result)
| true | true |
f728659e152b0ab0ac0b69c92e35428aa12d16bd | 1,546 | py | Python | scripts/practice/FB/DiameterofBinaryTree.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
] | null | null | null | scripts/practice/FB/DiameterofBinaryTree.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
] | 8 | 2020-09-05T16:04:31.000Z | 2022-02-27T09:57:51.000Z | scripts/practice/FB/DiameterofBinaryTree.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
] | null | null | null | """
Given the root of a binary tree, return the length of the diameter of the tree.
The diameter of a binary tree is the length of the longest path between any two nodes in a tree.
This path may or may not pass through the root.
The length of a path between two nodes is represented by the number of edges between them.
Example 1:
Input: root = [1,2,3,4,5]
Output: 3
Explanation: 3 is the length of the path [4,2,1,3] or [5,2,1,3].
Example 2:
Input: root = [1,2]
Output: 1
Constraints:
The number of nodes in the tree is in the range [1, 104].
-100 <= Node.val <= 100
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def diameterOfBinaryTree(self, root):
diameter = 0
def longest_path(node):
if not node:
return 0
nonlocal diameter
# recursively find the longest path in
# both left child and right child
left_path = longest_path(node.left)
right_path = longest_path(node.right)
# update the diameter if left_path plus right_path is larger
diameter = max(diameter, left_path + right_path)
# return the longest one between left_path and right_path;
# remember to add 1 for the path connecting the node and its parent
return max(left_path, right_path) + 1
longest_path(root)
return diameter
| 25.766667 | 97 | 0.641656 |
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def diameterOfBinaryTree(self, root):
diameter = 0
def longest_path(node):
if not node:
return 0
nonlocal diameter
left_path = longest_path(node.left)
right_path = longest_path(node.right)
diameter = max(diameter, left_path + right_path)
return max(left_path, right_path) + 1
longest_path(root)
return diameter
| true | true |
f728661a3070ebaff8acf86cbb8fae3a9b6e04a3 | 548 | py | Python | exercises/fr/exc_01_11.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 2,085 | 2019-04-17T13:10:40.000Z | 2022-03-30T21:51:46.000Z | exercises/fr/exc_01_11.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 79 | 2019-04-18T14:42:55.000Z | 2022-03-07T08:15:43.000Z | exercises/fr/exc_01_11.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 361 | 2019-04-17T13:34:32.000Z | 2022-03-28T04:42:45.000Z | import spacy
# Importe le Matcher
from spacy.____ import ____
nlp = spacy.load("fr_core_news_sm")
doc = nlp("Le constructeur Citröen présente la e-Méhari Courrèges au public.")
# Initialise le matcher avec le vocabulaire partagé
matcher = ____(____.____)
# Crée un motif qui recherche les deux tokens : "e-Méhari" et "Courrèges"
pattern = [____]
# Ajoute le motif au matcher
____.____("MEHARI_PATTERN", None, ____)
# Utilise le matcher sur le doc
matches = ____
print("Résultats :", [doc[start:end].text for match_id, start, end in matches])
| 26.095238 | 79 | 0.74635 | import spacy
from spacy.____ import ____
nlp = spacy.load("fr_core_news_sm")
doc = nlp("Le constructeur Citröen présente la e-Méhari Courrèges au public.")
matcher = ____(____.____)
pattern = [____]
____.____("MEHARI_PATTERN", None, ____)
matches = ____
print("Résultats :", [doc[start:end].text for match_id, start, end in matches])
| true | true |
f7286637c96bb881fbed2a35eadf81bc73e37593 | 284 | py | Python | plugins/trezor/__init__.py | zaapnetwork/electrum-zaap | eb7ce05300508eef9cdb55432ce58c842ce03661 | [
"MIT"
] | null | null | null | plugins/trezor/__init__.py | zaapnetwork/electrum-zaap | eb7ce05300508eef9cdb55432ce58c842ce03661 | [
"MIT"
] | null | null | null | plugins/trezor/__init__.py | zaapnetwork/electrum-zaap | eb7ce05300508eef9cdb55432ce58c842ce03661 | [
"MIT"
] | null | null | null | from electrum_zaap.i18n import _
fullname = 'TREZOR Wallet'
description = _('Provides support for TREZOR hardware wallet')
requires = [('trezorlib','github.com/trezor/python-trezor')]
registers_keystore = ('hardware', 'trezor', _("TREZOR wallet"))
available_for = ['qt', 'cmdline']
| 31.555556 | 63 | 0.735915 | from electrum_zaap.i18n import _
fullname = 'TREZOR Wallet'
description = _('Provides support for TREZOR hardware wallet')
requires = [('trezorlib','github.com/trezor/python-trezor')]
registers_keystore = ('hardware', 'trezor', _("TREZOR wallet"))
available_for = ['qt', 'cmdline']
| true | true |
f7286664ea8b95c1a36f0b84b6f8e95547a6c9ae | 1,154 | py | Python | lightkube/operators.py | addyess/lightkube | 3d2f4ab41bf9daa168e923f3b820d9379d6d56b6 | [
"MIT"
] | 63 | 2020-07-04T14:40:53.000Z | 2022-03-31T07:20:23.000Z | lightkube/operators.py | addyess/lightkube | 3d2f4ab41bf9daa168e923f3b820d9379d6d56b6 | [
"MIT"
] | 15 | 2021-10-13T16:11:04.000Z | 2022-03-28T21:34:50.000Z | lightkube/operators.py | addyess/lightkube | 3d2f4ab41bf9daa168e923f3b820d9379d6d56b6 | [
"MIT"
] | 6 | 2020-10-24T08:42:37.000Z | 2022-03-10T18:11:11.000Z | from typing import Iterable
__all__ = ['in_', 'not_in', 'exists', 'not_exists', 'equal', 'not_equal']
class Operator:
def __init__(self, op_name: str, op: str, value=None):
self.op = op
self.value = value
self.op_name = op_name
def encode(self, key):
return f"{key}{self.op}{self.value}"
class SequenceOperator(Operator):
def encode(self, key):
return f"{key} {self.op} ({','.join(self.value)})"
class BinaryOperator(Operator):
pass
class UnaryOperator(Operator):
def encode(self, key):
return f"{self.op}{key}"
def in_(values: Iterable) -> SequenceOperator:
return SequenceOperator('in_', 'in', sorted(values))
def not_in(values: Iterable) -> SequenceOperator:
return SequenceOperator('not_in', 'notin', sorted(values))
def exists() -> UnaryOperator:
return UnaryOperator('exists', '')
def not_exists() -> UnaryOperator:
return UnaryOperator('not_exists', '!')
def equal(value: str) -> BinaryOperator:
return BinaryOperator('equal', '=', value)
def not_equal(value: str) -> BinaryOperator:
return BinaryOperator('not_equal', '!=', value)
| 21.773585 | 73 | 0.654246 | from typing import Iterable
__all__ = ['in_', 'not_in', 'exists', 'not_exists', 'equal', 'not_equal']
class Operator:
def __init__(self, op_name: str, op: str, value=None):
self.op = op
self.value = value
self.op_name = op_name
def encode(self, key):
return f"{key}{self.op}{self.value}"
class SequenceOperator(Operator):
def encode(self, key):
return f"{key} {self.op} ({','.join(self.value)})"
class BinaryOperator(Operator):
pass
class UnaryOperator(Operator):
def encode(self, key):
return f"{self.op}{key}"
def in_(values: Iterable) -> SequenceOperator:
return SequenceOperator('in_', 'in', sorted(values))
def not_in(values: Iterable) -> SequenceOperator:
return SequenceOperator('not_in', 'notin', sorted(values))
def exists() -> UnaryOperator:
return UnaryOperator('exists', '')
def not_exists() -> UnaryOperator:
return UnaryOperator('not_exists', '!')
def equal(value: str) -> BinaryOperator:
return BinaryOperator('equal', '=', value)
def not_equal(value: str) -> BinaryOperator:
return BinaryOperator('not_equal', '!=', value)
| true | true |
f72866919f41f4941197312cfc63787e81a205c5 | 7,540 | py | Python | ambari-server/src/test/python/TestServerUpgrade.py | fangxingli/mambari | 6da9f6090d4d42623529b73413c8feb8b7f6fe45 | [
"Apache-2.0",
"MIT"
] | null | null | null | ambari-server/src/test/python/TestServerUpgrade.py | fangxingli/mambari | 6da9f6090d4d42623529b73413c8feb8b7f6fe45 | [
"Apache-2.0",
"MIT"
] | null | null | null | ambari-server/src/test/python/TestServerUpgrade.py | fangxingli/mambari | 6da9f6090d4d42623529b73413c8feb8b7f6fe45 | [
"Apache-2.0",
"MIT"
] | null | null | null | '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import StringIO
import sys
from ambari_commons.exceptions import FatalException
from unittest import TestCase
from mock.mock import patch, MagicMock
from ambari_server.serverUpgrade import set_current, SetCurrentVersionOptions, upgrade_stack
import ambari_server
class TestServerUpgrade(TestCase):
@patch("ambari_server.serverUpgrade.is_server_runing")
@patch('ambari_server.serverUpgrade.SetCurrentVersionOptions.no_finalize_options_set')
@patch('ambari_server.serverUpgrade.get_validated_string_input')
@patch('ambari_server.serverUpgrade.get_ambari_properties')
@patch('ambari_server.serverUtils.get_ambari_server_api_base')
@patch('ambari_commons.logging_utils.get_verbose')
@patch('urllib2.urlopen')
def test_set_current(self, urlopen_mock, get_verbose_mock, get_ambari_server_api_base_mock,
get_ambari_properties_mock, get_validated_string_input_mock,
no_finalize_options_set_mock, is_server_runing_mock):
options = MagicMock()
options.cluster_name = 'cc'
options.desired_repo_version = 'HDP-2.2.2.0-2561'
options.force_repo_version = None
# Case when server is not running
is_server_runing_mock.return_value = False, None
try:
set_current(options)
self.fail("Server is not running - should error out")
except FatalException:
pass # expected
is_server_runing_mock.return_value = True, 11111
# Test insufficient options case
no_finalize_options_set_mock.return_value = True
try:
set_current(options)
self.fail("Should error out")
except FatalException:
pass # expected
no_finalize_options_set_mock.return_value = False
# Test normal flow
get_validated_string_input_mock.return_value = 'dummy_string'
p = get_ambari_properties_mock.return_value
p.get_property.side_effect = ["8080", "false"]
get_ambari_server_api_base_mock.return_value = 'http://127.0.0.1:8080/api/v1/'
get_verbose_mock.retun_value = False
set_current(options)
self.assertTrue(urlopen_mock.called)
request = urlopen_mock.call_args_list[0][0][0]
self.assertEquals(request._Request__original, 'http://127.0.0.1:8080/api/v1/clusters/cc/stack_versions')
self.assertEquals(request.data, '{"ClusterStackVersions": {"state": "CURRENT", "repository_version": "HDP-2.2.2.0-2561", "force": false}}')
self.assertEquals(request.origin_req_host, '127.0.0.1')
self.assertEquals(request.headers, {'X-requested-by': 'ambari', 'Authorization': 'Basic ZHVtbXlfc3RyaW5nOmR1bW15X3N0cmluZw=='})
@patch("ambari_server.serverUpgrade.is_server_runing")
@patch('ambari_server.serverUpgrade.SetCurrentVersionOptions.no_finalize_options_set')
@patch('ambari_server.serverUpgrade.get_validated_string_input')
@patch('ambari_server.serverUpgrade.get_ambari_properties')
@patch('ambari_server.serverUtils.get_ambari_server_api_base')
@patch('ambari_commons.logging_utils.get_verbose')
@patch('urllib2.urlopen')
def test_set_current_with_force(self, urlopen_mock, get_verbose_mock, get_ambari_server_api_base_mock,
get_ambari_properties_mock, get_validated_string_input_mock,
no_finalize_options_set_mock, is_server_runing_mock):
options = MagicMock()
options.cluster_name = 'cc'
options.desired_repo_version = 'HDP-2.2.2.0-2561'
options.force_repo_version = True
# Case when server is not running
is_server_runing_mock.return_value = False, None
try:
set_current(options)
self.fail("Server is not running - should error out")
except FatalException:
pass # expected
is_server_runing_mock.return_value = True, 11111
# Test insufficient options case
no_finalize_options_set_mock.return_value = True
try:
set_current(options)
self.fail("Should error out")
except FatalException:
pass # expected
no_finalize_options_set_mock.return_value = False
# Test normal flow
get_validated_string_input_mock.return_value = 'dummy_string'
p = get_ambari_properties_mock.return_value
p.get_property.side_effect = ["8080", "false"]
get_ambari_server_api_base_mock.return_value = 'http://127.0.0.1:8080/api/v1/'
get_verbose_mock.retun_value = False
set_current(options)
self.assertTrue(urlopen_mock.called)
request = urlopen_mock.call_args_list[0][0][0]
self.assertEquals(request._Request__original, 'http://127.0.0.1:8080/api/v1/clusters/cc/stack_versions')
self.assertEquals(request.data, '{"ClusterStackVersions": {"state": "CURRENT", "repository_version": "HDP-2.2.2.0-2561", "force": true}}')
self.assertEquals(request.origin_req_host, '127.0.0.1')
self.assertEquals(request.headers, {'X-requested-by': 'ambari', 'Authorization': 'Basic ZHVtbXlfc3RyaW5nOmR1bW15X3N0cmluZw=='})
@patch("ambari_server.serverUpgrade.run_os_command")
@patch("ambari_server.serverUpgrade.get_java_exe_path")
@patch("ambari_server.serverConfiguration.get_ambari_properties")
@patch("ambari_server.serverUpgrade.get_ambari_properties")
@patch("ambari_server.serverUpgrade.check_database_name_property")
@patch("ambari_server.serverUpgrade.is_root")
def test_upgrade_stack(self, is_root_mock, c_d_n_p_mock, up_g_a_p_mock, server_g_a_p_mock, java_path_mock, run_os_mock):
run_os_mock.return_value = 0, "", ""
java_path_mock.return_value = ""
is_root_mock.return_value = True
def do_nothing():
pass
c_d_n_p_mock.side_effect = do_nothing
p = ambari_server.properties.Properties()
p._props = {
ambari_server.serverConfiguration.JDBC_DATABASE_PROPERTY: "mysql",
ambari_server.serverConfiguration.JDBC_DATABASE_NAME_PROPERTY: "ambari"
}
up_g_a_p_mock.side_effect = [p, p]
server_g_a_p_mock.side_effect = [p]
args = ["upgrade_stack", "HDP-2.3"]
upgrade_stack(args)
self.assertTrue(run_os_mock.called)
command = run_os_mock.call_args_list[0][0][0]
self.assertTrue("StackUpgradeHelper" in command and "HDP" in command and "2.3" in command)
def testCurrentVersionOptions(self):
# Negative test cases
options = MagicMock()
options.cluster_name = None
options.desired_repo_version = 'HDP-2.2.2.0-2561'
cvo = SetCurrentVersionOptions(options)
self.assertTrue(cvo.no_finalize_options_set())
options = MagicMock()
options.cluster_name = 'cc'
options.desired_repo_version = None
cvo = SetCurrentVersionOptions(options)
self.assertTrue(cvo.no_finalize_options_set())
# Positive test case
options = MagicMock()
options.cluster_name = 'cc'
options.desired_repo_version = 'HDP-2.2.2.0-2561'
cvo = SetCurrentVersionOptions(options)
self.assertFalse(cvo.no_finalize_options_set())
| 39.270833 | 143 | 0.753581 |
import StringIO
import sys
from ambari_commons.exceptions import FatalException
from unittest import TestCase
from mock.mock import patch, MagicMock
from ambari_server.serverUpgrade import set_current, SetCurrentVersionOptions, upgrade_stack
import ambari_server
class TestServerUpgrade(TestCase):
@patch("ambari_server.serverUpgrade.is_server_runing")
@patch('ambari_server.serverUpgrade.SetCurrentVersionOptions.no_finalize_options_set')
@patch('ambari_server.serverUpgrade.get_validated_string_input')
@patch('ambari_server.serverUpgrade.get_ambari_properties')
@patch('ambari_server.serverUtils.get_ambari_server_api_base')
@patch('ambari_commons.logging_utils.get_verbose')
@patch('urllib2.urlopen')
def test_set_current(self, urlopen_mock, get_verbose_mock, get_ambari_server_api_base_mock,
get_ambari_properties_mock, get_validated_string_input_mock,
no_finalize_options_set_mock, is_server_runing_mock):
options = MagicMock()
options.cluster_name = 'cc'
options.desired_repo_version = 'HDP-2.2.2.0-2561'
options.force_repo_version = None
is_server_runing_mock.return_value = False, None
try:
set_current(options)
self.fail("Server is not running - should error out")
except FatalException:
pass
is_server_runing_mock.return_value = True, 11111
no_finalize_options_set_mock.return_value = True
try:
set_current(options)
self.fail("Should error out")
except FatalException:
pass
no_finalize_options_set_mock.return_value = False
get_validated_string_input_mock.return_value = 'dummy_string'
p = get_ambari_properties_mock.return_value
p.get_property.side_effect = ["8080", "false"]
get_ambari_server_api_base_mock.return_value = 'http://127.0.0.1:8080/api/v1/'
get_verbose_mock.retun_value = False
set_current(options)
self.assertTrue(urlopen_mock.called)
request = urlopen_mock.call_args_list[0][0][0]
self.assertEquals(request._Request__original, 'http://127.0.0.1:8080/api/v1/clusters/cc/stack_versions')
self.assertEquals(request.data, '{"ClusterStackVersions": {"state": "CURRENT", "repository_version": "HDP-2.2.2.0-2561", "force": false}}')
self.assertEquals(request.origin_req_host, '127.0.0.1')
self.assertEquals(request.headers, {'X-requested-by': 'ambari', 'Authorization': 'Basic ZHVtbXlfc3RyaW5nOmR1bW15X3N0cmluZw=='})
@patch("ambari_server.serverUpgrade.is_server_runing")
@patch('ambari_server.serverUpgrade.SetCurrentVersionOptions.no_finalize_options_set')
@patch('ambari_server.serverUpgrade.get_validated_string_input')
@patch('ambari_server.serverUpgrade.get_ambari_properties')
@patch('ambari_server.serverUtils.get_ambari_server_api_base')
@patch('ambari_commons.logging_utils.get_verbose')
@patch('urllib2.urlopen')
def test_set_current_with_force(self, urlopen_mock, get_verbose_mock, get_ambari_server_api_base_mock,
get_ambari_properties_mock, get_validated_string_input_mock,
no_finalize_options_set_mock, is_server_runing_mock):
options = MagicMock()
options.cluster_name = 'cc'
options.desired_repo_version = 'HDP-2.2.2.0-2561'
options.force_repo_version = True
is_server_runing_mock.return_value = False, None
try:
set_current(options)
self.fail("Server is not running - should error out")
except FatalException:
pass
is_server_runing_mock.return_value = True, 11111
no_finalize_options_set_mock.return_value = True
try:
set_current(options)
self.fail("Should error out")
except FatalException:
pass
no_finalize_options_set_mock.return_value = False
get_validated_string_input_mock.return_value = 'dummy_string'
p = get_ambari_properties_mock.return_value
p.get_property.side_effect = ["8080", "false"]
get_ambari_server_api_base_mock.return_value = 'http://127.0.0.1:8080/api/v1/'
get_verbose_mock.retun_value = False
set_current(options)
self.assertTrue(urlopen_mock.called)
request = urlopen_mock.call_args_list[0][0][0]
self.assertEquals(request._Request__original, 'http://127.0.0.1:8080/api/v1/clusters/cc/stack_versions')
self.assertEquals(request.data, '{"ClusterStackVersions": {"state": "CURRENT", "repository_version": "HDP-2.2.2.0-2561", "force": true}}')
self.assertEquals(request.origin_req_host, '127.0.0.1')
self.assertEquals(request.headers, {'X-requested-by': 'ambari', 'Authorization': 'Basic ZHVtbXlfc3RyaW5nOmR1bW15X3N0cmluZw=='})
@patch("ambari_server.serverUpgrade.run_os_command")
@patch("ambari_server.serverUpgrade.get_java_exe_path")
@patch("ambari_server.serverConfiguration.get_ambari_properties")
@patch("ambari_server.serverUpgrade.get_ambari_properties")
@patch("ambari_server.serverUpgrade.check_database_name_property")
@patch("ambari_server.serverUpgrade.is_root")
def test_upgrade_stack(self, is_root_mock, c_d_n_p_mock, up_g_a_p_mock, server_g_a_p_mock, java_path_mock, run_os_mock):
run_os_mock.return_value = 0, "", ""
java_path_mock.return_value = ""
is_root_mock.return_value = True
def do_nothing():
pass
c_d_n_p_mock.side_effect = do_nothing
p = ambari_server.properties.Properties()
p._props = {
ambari_server.serverConfiguration.JDBC_DATABASE_PROPERTY: "mysql",
ambari_server.serverConfiguration.JDBC_DATABASE_NAME_PROPERTY: "ambari"
}
up_g_a_p_mock.side_effect = [p, p]
server_g_a_p_mock.side_effect = [p]
args = ["upgrade_stack", "HDP-2.3"]
upgrade_stack(args)
self.assertTrue(run_os_mock.called)
command = run_os_mock.call_args_list[0][0][0]
self.assertTrue("StackUpgradeHelper" in command and "HDP" in command and "2.3" in command)
def testCurrentVersionOptions(self):
options = MagicMock()
options.cluster_name = None
options.desired_repo_version = 'HDP-2.2.2.0-2561'
cvo = SetCurrentVersionOptions(options)
self.assertTrue(cvo.no_finalize_options_set())
options = MagicMock()
options.cluster_name = 'cc'
options.desired_repo_version = None
cvo = SetCurrentVersionOptions(options)
self.assertTrue(cvo.no_finalize_options_set())
options = MagicMock()
options.cluster_name = 'cc'
options.desired_repo_version = 'HDP-2.2.2.0-2561'
cvo = SetCurrentVersionOptions(options)
self.assertFalse(cvo.no_finalize_options_set())
| true | true |
f72866b84e4459ca6c2400a13d478a5eceb3c0df | 6,785 | py | Python | AR/exp/common/ntu_tools.py | YuJaceKim/Activity-Recognition-with-Combination-of-Deeply-Learned-Visual-Attention-and-Pose-Estimation | 23b9191f150d0edb981cf22a47a618feb55578b9 | [
"MIT"
] | 343 | 2018-07-18T10:39:30.000Z | 2022-03-30T02:32:06.000Z | AR/exp/common/ntu_tools.py | YuJaceKim/Activity-Recognition-with-Combination-of-Deeply-Learned-Visual-Attention-and-Pose-Estimation | 23b9191f150d0edb981cf22a47a618feb55578b9 | [
"MIT"
] | 47 | 2018-09-03T03:35:13.000Z | 2021-11-15T02:09:15.000Z | AR/exp/common/ntu_tools.py | YuJaceKim/Activity-Recognition-with-Combination-of-Deeply-Learned-Visual-Attention-and-Pose-Estimation | 23b9191f150d0edb981cf22a47a618feb55578b9 | [
"MIT"
] | 83 | 2018-10-15T08:36:12.000Z | 2022-03-05T05:51:16.000Z | import os
import numpy as np
import json
import time
from keras.callbacks import Callback
from deephar.data import BatchLoader
from deephar.utils import *
def eval_singleclip_gt_bbox_generator(model, datagen, verbose=1):
num_blocks = len(model.outputs)
num_samples = len(datagen)
start = time.time()
for i in range(num_samples):
if verbose > 1:
printcn('', 'pred %05d/%05d' % (i+1, num_samples))
[x], [y] = datagen[i]
if 'y_true' not in locals():
y_true = np.zeros((num_samples,) + y.shape[1:])
y_pred = np.zeros((num_samples, num_blocks) + y.shape[1:])
y_true[i, :] = y
pred = model.predict(x)
for b in range(num_blocks):
y_pred[i, b, :] = pred[b]
dt = time.time() - start
if verbose:
printc(WARNING, 'NTU, single-clip, GT bbox, action acc.%:')
scores = []
for b in range(num_blocks):
correct = np.equal(np.argmax(y_true, axis=-1),
np.argmax(y_pred[:, b, :], axis=-1), dtype=np.float)
scores.append(sum(correct) / len(correct))
if verbose:
printc(WARNING, ' %.1f ' % (100*scores[-1]))
if verbose:
printcn('', '\n%d samples in %.1f sec: %.1f clips per sec' \
% (num_samples, dt, num_samples / dt))
return scores
def eval_multiclip_dataset(model, ntu, subsampling, bboxes_file=None,
logdir=None, verbose=1):
"""If bboxes_file if not given, use ground truth bounding boxes."""
num_samples = ntu.get_length(TEST_MODE)
num_blocks = len(model.outputs)
"""Save and reset some original configs from the dataset."""
org_hflip = ntu.dataconf.fixed_hflip
org_use_gt_bbox = ntu.use_gt_bbox
cnt_corr = 0
cnt_total = 0
action_shape = (num_samples,) + ntu.get_shape('ntuaction')
a_true = np.zeros(action_shape)
a_pred = np.ones((num_blocks,) + action_shape)
missing_clips = {}
if bboxes_file is not None:
with open(bboxes_file, 'r') as fid:
bboxes_data = json.load(fid)
ntu.use_gt_bbox = False
bboxes_info = 'Using bounding boxes from file "{}"'.format(bboxes_file)
else:
bboxes_data = None
ntu.use_gt_bbox = True
bboxes_info = 'Using ground truth bounding boxes.'
for i in range(num_samples):
if verbose:
printc(OKBLUE, '%04d/%04d\t' % (i, num_samples))
frame_list = ntu.get_clip_index(i, TEST_MODE, subsamples=[subsampling])
"""Variable to hold all preditions for this sequence.
2x frame_list due to hflip.
"""
allpred = np.ones((num_blocks, 2*len(frame_list)) + action_shape[1:])
for f in range(len(frame_list)):
for hflip in range(2):
preds_clip = []
try:
ntu.dataconf.fixed_hflip = hflip # Force horizontal flip
bbox = None
if bboxes_data is not None:
key = '%04d.%d.%03d.%d' % (i, subsampling, f, hflip)
try:
bbox = np.array(bboxes_data[key])
except:
warning('Missing bounding box key ' + str(key))
"""Load clip and predict action."""
data = ntu.get_data(i, TEST_MODE, frame_list=frame_list[f],
bbox=bbox)
a_true[i, :] = data['ntuaction']
pred = model.predict(np.expand_dims(data['frame'], axis=0))
for b in range(num_blocks):
allpred[b, 2*f+hflip, :] = pred[b][0]
a_pred[b, i, :] *= pred[b][0]
if np.argmax(a_true[i]) != np.argmax(a_pred[-1, i]):
missing_clips['%04d.%03d.%d' % (i, f, hflip)] = [
int(np.argmax(a_true[i])),
int(np.argmax(a_pred[-1, i]))]
except Exception as e:
warning('eval_multiclip, exception on sample ' \
+ str(i) + ' frame ' + str(f) + ': ' + str(e))
if verbose:
cor = int(np.argmax(a_true[i]) == np.argmax(a_pred[-1, i]))
cnt_total += 1
cnt_corr += cor
printnl('%d : %.1f' % (cor, 100 * cnt_corr / cnt_total))
if logdir is not None:
np.save('%s/a_pred.npy' % logdir, a_pred)
np.save('%s/a_true.npy' % logdir, a_true)
with open(os.path.join(logdir, 'missing-clips.json'), 'w') as fid:
json.dump(missing_clips, fid)
a_true = np.expand_dims(a_true, axis=0)
a_true = np.tile(a_true, (num_blocks, 1, 1))
correct = np.argmax(a_true, axis=-1) == np.argmax(a_pred, axis=-1)
scores = 100*np.sum(correct, axis=-1) / num_samples
if verbose:
printcn(WARNING, 'NTU, multi-clip. ' + bboxes_info + '\n')
printcn(WARNING, np.array2string(np.array(scores), precision=2))
printcn(WARNING, 'NTU best: %.2f' % max(scores))
ntu.dataconf.fixed_hflip = org_hflip
ntu.use_gt_bbox = org_use_gt_bbox
return scores
class NtuEvalCallback(Callback):
def __init__(self, data, eval_model=None, logdir=None):
assert type(data) == BatchLoader, \
'data must be a BatchLoader instance, ' \
+ 'got {} instead'.format(data)
self.data = data
self.eval_model = eval_model
self.scores = {}
self.logdir = logdir
def on_epoch_end(self, epoch, logs={}):
if self.eval_model is not None:
model = self.eval_model
else:
model = self.model
scores = eval_singleclip_gt_bbox_generator(model, self.data)
epoch += 1
if self.logdir is not None:
if not hasattr(self, 'logarray'):
self.logarray = {}
self.logarray[epoch] = scores
with open(os.path.join(self.logdir, 'ntu_val.json'), 'w') as f:
json.dump(self.logarray, f)
cur_best = max(scores)
self.scores[epoch] = cur_best
printcn(OKBLUE, 'Best score is %.1f at epoch %d' % \
(100*self.best_score, self.best_epoch))
@property
def best_epoch(self):
if len(self.scores) > 0:
# Get the key of the maximum value from a dict
return max(self.scores, key=self.scores.get)
else:
return np.inf
@property
def best_score(self):
if len(self.scores) > 0:
# Get the maximum value from a dict
return self.scores[self.best_epoch]
else:
return 0
# Aliases.
eval_singleclip_generator = eval_singleclip_gt_bbox_generator
| 32.777778 | 79 | 0.550479 | import os
import numpy as np
import json
import time
from keras.callbacks import Callback
from deephar.data import BatchLoader
from deephar.utils import *
def eval_singleclip_gt_bbox_generator(model, datagen, verbose=1):
num_blocks = len(model.outputs)
num_samples = len(datagen)
start = time.time()
for i in range(num_samples):
if verbose > 1:
printcn('', 'pred %05d/%05d' % (i+1, num_samples))
[x], [y] = datagen[i]
if 'y_true' not in locals():
y_true = np.zeros((num_samples,) + y.shape[1:])
y_pred = np.zeros((num_samples, num_blocks) + y.shape[1:])
y_true[i, :] = y
pred = model.predict(x)
for b in range(num_blocks):
y_pred[i, b, :] = pred[b]
dt = time.time() - start
if verbose:
printc(WARNING, 'NTU, single-clip, GT bbox, action acc.%:')
scores = []
for b in range(num_blocks):
correct = np.equal(np.argmax(y_true, axis=-1),
np.argmax(y_pred[:, b, :], axis=-1), dtype=np.float)
scores.append(sum(correct) / len(correct))
if verbose:
printc(WARNING, ' %.1f ' % (100*scores[-1]))
if verbose:
printcn('', '\n%d samples in %.1f sec: %.1f clips per sec' \
% (num_samples, dt, num_samples / dt))
return scores
def eval_multiclip_dataset(model, ntu, subsampling, bboxes_file=None,
logdir=None, verbose=1):
num_samples = ntu.get_length(TEST_MODE)
num_blocks = len(model.outputs)
org_hflip = ntu.dataconf.fixed_hflip
org_use_gt_bbox = ntu.use_gt_bbox
cnt_corr = 0
cnt_total = 0
action_shape = (num_samples,) + ntu.get_shape('ntuaction')
a_true = np.zeros(action_shape)
a_pred = np.ones((num_blocks,) + action_shape)
missing_clips = {}
if bboxes_file is not None:
with open(bboxes_file, 'r') as fid:
bboxes_data = json.load(fid)
ntu.use_gt_bbox = False
bboxes_info = 'Using bounding boxes from file "{}"'.format(bboxes_file)
else:
bboxes_data = None
ntu.use_gt_bbox = True
bboxes_info = 'Using ground truth bounding boxes.'
for i in range(num_samples):
if verbose:
printc(OKBLUE, '%04d/%04d\t' % (i, num_samples))
frame_list = ntu.get_clip_index(i, TEST_MODE, subsamples=[subsampling])
allpred = np.ones((num_blocks, 2*len(frame_list)) + action_shape[1:])
for f in range(len(frame_list)):
for hflip in range(2):
preds_clip = []
try:
ntu.dataconf.fixed_hflip = hflip
bbox = None
if bboxes_data is not None:
key = '%04d.%d.%03d.%d' % (i, subsampling, f, hflip)
try:
bbox = np.array(bboxes_data[key])
except:
warning('Missing bounding box key ' + str(key))
data = ntu.get_data(i, TEST_MODE, frame_list=frame_list[f],
bbox=bbox)
a_true[i, :] = data['ntuaction']
pred = model.predict(np.expand_dims(data['frame'], axis=0))
for b in range(num_blocks):
allpred[b, 2*f+hflip, :] = pred[b][0]
a_pred[b, i, :] *= pred[b][0]
if np.argmax(a_true[i]) != np.argmax(a_pred[-1, i]):
missing_clips['%04d.%03d.%d' % (i, f, hflip)] = [
int(np.argmax(a_true[i])),
int(np.argmax(a_pred[-1, i]))]
except Exception as e:
warning('eval_multiclip, exception on sample ' \
+ str(i) + ' frame ' + str(f) + ': ' + str(e))
if verbose:
cor = int(np.argmax(a_true[i]) == np.argmax(a_pred[-1, i]))
cnt_total += 1
cnt_corr += cor
printnl('%d : %.1f' % (cor, 100 * cnt_corr / cnt_total))
if logdir is not None:
np.save('%s/a_pred.npy' % logdir, a_pred)
np.save('%s/a_true.npy' % logdir, a_true)
with open(os.path.join(logdir, 'missing-clips.json'), 'w') as fid:
json.dump(missing_clips, fid)
a_true = np.expand_dims(a_true, axis=0)
a_true = np.tile(a_true, (num_blocks, 1, 1))
correct = np.argmax(a_true, axis=-1) == np.argmax(a_pred, axis=-1)
scores = 100*np.sum(correct, axis=-1) / num_samples
if verbose:
printcn(WARNING, 'NTU, multi-clip. ' + bboxes_info + '\n')
printcn(WARNING, np.array2string(np.array(scores), precision=2))
printcn(WARNING, 'NTU best: %.2f' % max(scores))
ntu.dataconf.fixed_hflip = org_hflip
ntu.use_gt_bbox = org_use_gt_bbox
return scores
class NtuEvalCallback(Callback):
def __init__(self, data, eval_model=None, logdir=None):
assert type(data) == BatchLoader, \
'data must be a BatchLoader instance, ' \
+ 'got {} instead'.format(data)
self.data = data
self.eval_model = eval_model
self.scores = {}
self.logdir = logdir
def on_epoch_end(self, epoch, logs={}):
if self.eval_model is not None:
model = self.eval_model
else:
model = self.model
scores = eval_singleclip_gt_bbox_generator(model, self.data)
epoch += 1
if self.logdir is not None:
if not hasattr(self, 'logarray'):
self.logarray = {}
self.logarray[epoch] = scores
with open(os.path.join(self.logdir, 'ntu_val.json'), 'w') as f:
json.dump(self.logarray, f)
cur_best = max(scores)
self.scores[epoch] = cur_best
printcn(OKBLUE, 'Best score is %.1f at epoch %d' % \
(100*self.best_score, self.best_epoch))
@property
def best_epoch(self):
if len(self.scores) > 0:
return max(self.scores, key=self.scores.get)
else:
return np.inf
@property
def best_score(self):
if len(self.scores) > 0:
return self.scores[self.best_epoch]
else:
return 0
eval_singleclip_generator = eval_singleclip_gt_bbox_generator
| true | true |
f7286704124989985083256db66a9323c4428a17 | 9,820 | py | Python | src/ocr_recog/ocr_recognizer.py | deepguider/RoadGPS | 7db4669a54da98a854886b89b6922fb8c7a60f33 | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2019-05-22T12:47:34.000Z | 2019-05-23T15:43:47.000Z | src/ocr_recog/ocr_recognizer.py | deepguider/RoadGPS | 7db4669a54da98a854886b89b6922fb8c7a60f33 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/ocr_recog/ocr_recognizer.py | deepguider/RoadGPS | 7db4669a54da98a854886b89b6922fb8c7a60f33 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2019-08-09T06:50:46.000Z | 2019-08-09T06:50:46.000Z | import os
os.environ["CUDA_VISIBLE_DEVICES"]="0" #CUDA_VISIBLE_DEVICES=0 (always use the first GPU only)
import time
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
from utils import AttnLabelConverter
from model import Model
from demo import detect_ocr
from craft.craft import CRAFT
from collections import OrderedDict
#####################################
# 21.06.04 Astrid
# https://github.com/googleapis/oauth2client/issues/642#issuecomment-279643203
'''
Solving this error
File "./../src/ocr_recog/ocr_recognizer.py", line 41, in __init__
self.opt_craft, self.opt_recog = self.setup_parser()
File "./../src/ocr_recog/ocr_recognizer.py", line 120, in setup_parser
parser_craft = argparse.ArgumentParser(description='CRAFT Text Detection')
File "/usr/lib/python3.6/argparse.py", line 1635, in __init__
prog = _os.path.basename(_sys.argv[0])
AttributeError: module 'sys' has no attribute 'argv'
'''
import sys
if not hasattr(sys, 'argv'):
sys.argv = ['']
#####################################
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1")
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
class OCRRecognizer:
def __init__(self):
self.net = None #detect
self.model = None #recog
self.converter = None
#self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.res_imagefileName = None
self.opt_craft, self.opt_recog = self.setup_parser()
self.args_craft= vars(self.opt_craft)
self.args = vars(self.opt_recog)
self.detect_time = 0.0
self.recog_time = 0.0
self.total_time =0.0
# print("~~~~~~~~ Hyperparameters used: ~~~~~~~")
# for x, y in self.args.items():
# print("{} : {}".format(x, y))
self.__dict__.update(self.args_craft)
self.__dict__.update(self.args)
def initialize(self):
start = time.time()
# self.saved_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train_addKorean_synth/best_accuracy.pth'
# self.craft_trained_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train/craft_mlt_25k.pth'
# self.saved_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train_v2/best_accuracy.pth'
# self.craft_trained_model = '/home_hongdo/sungeun.kim/checkpoints/ocr/ocr_train_v2/best_accuracy_craft.pth'
#
# official
self.saved_model = './data_ocr/best_accuracy.pth'
self.craft_trained_model = './data_ocr/best_craft.pth'
self.logfilepath = './data_ocr/log_ocr_result.txt'
if torch.cuda.is_available():
self.device = torch.device('cuda')
self.cuda = True
cudnn.benchmark = False
else:
self.device = torch.device('cpu')
self.cuda = False
cudnn.benchmark = True
""" vocab / character number configuration """
# if self.sensitive:
# self.character = string.printable[:-6] # same with ASTER setting (use 94 char).
cudnn.deterministic = True
#self.num_gpu = torch.cuda.device_count()
""" model configuration """
# detetion
self.net = CRAFT(self).to(self.device) # initialize
print('Loading detection weights from checkpoint ' + self.craft_trained_model)
self.net.load_state_dict(copyStateDict(torch.load(self.craft_trained_model, map_location=self.device)))
#self.net = torch.nn.DataParallel(self.net).to(self.device)
self.net.to(self.device)
self.converter = AttnLabelConverter(self.character)
self.num_class = len(self.converter.character)
if self.rgb:
self.input_channel = 3
self.model = Model(self, self.num_class).to(self.device)
# load model
#self.model = torch.nn.DataParallel(self.model).to(self.device)
print('Loading recognition weights from checkpoint %s' % self.saved_model)
#ckpt = torch.load(self.saved_model, map_location=self.device)
self.model.load_state_dict(torch.load(self.saved_model, map_location=self.device))
self.model.to(self.device)
print('Initialization Done! It tooks {:.2f} sec.\n'.format(time.time() - start))
return True
def setup_parser(self):
"""
Sets up an argument parser
"""
parser_craft = argparse.ArgumentParser(description='CRAFT Text Detection')
parser_craft.add_argument('--craft_trained_model', default='weights/craft_mlt_25k.pth', type=str,
help='pretrained model')
parser_craft.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold')
parser_craft.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')
parser_craft.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold')
parser_craft.add_argument('--cuda', default=False, type=str2bool, help='Use cuda for inference')
parser_craft.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')
parser_craft.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')
parser_craft.add_argument('--poly', default=False, action='store_true', help='enable polygon type')
parser_craft.add_argument('--show_time', default=False, action='store_true', help='show processing time')
parser_craft.add_argument('--test_folder', default='/data/', type=str, help='folder path to input images')
parser_craft.add_argument('--result_folder', default='./results/', type=str, help='result folder path')
parser_craft.add_argument('--refine', default=False, action='store_true', help='enable link refiner')
parser_craft.add_argument('--refiner_model', default='weights/craft_refiner_CTW1500.pth', type=str,
help='pretrained refiner model')
args_craft = parser_craft.parse_args()
parser_recog = argparse.ArgumentParser(description='ocr recognition')
parser_recog.add_argument('--image_path', help='path to image_folder or image_file which contains text images')
parser_recog.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser_recog.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser_recog.add_argument('--saved_model', help="path to saved_model to evaluation")
parser_recog.add_argument('--logfilepath', help="path to log to demo")
""" Data processing """
parser_recog.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser_recog.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser_recog.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser_recog.add_argument('--rgb', action='store_true', help='use rgb input')
# parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser_recog.add_argument('--character', type=str,
default='0123456789abcdefghijklmnopqrstuvwxyz가각간갇갈감갑값갓강갖같갚갛개객걀걔거걱건걷걸검겁것겉게겨격겪견결겹경곁계고곡곤곧골곰곱곳공과관광괜괴굉교구국군굳굴굵굶굽궁권귀귓규균귤그극근글긁금급긋긍기긴길김깅깊까깍깎깐깔깜깝깡깥깨꺼꺾껌껍껏껑께껴꼬꼭꼴꼼꼽꽂꽃꽉꽤꾸꾼꿀꿈뀌끄끈끊끌끓끔끗끝끼낌나낙낚난날낡남납낫낭낮낯낱낳내냄냇냉냐냥너넉넌널넓넘넣네넥넷녀녁년념녕노녹논놀놈농높놓놔뇌뇨누눈눕뉘뉴늄느늑는늘늙능늦늬니닐님다닥닦단닫달닭닮담답닷당닿대댁댐댓더덕던덜덟덤덥덧덩덮데델도독돈돌돕돗동돼되된두둑둘둠둡둥뒤뒷드득든듣들듬듭듯등디딩딪따딱딴딸땀땅때땜떠떡떤떨떻떼또똑뚜뚫뚱뛰뜨뜩뜯뜰뜻띄라락란람랍랑랗래랜램랫략량러럭런럴럼럽럿렁렇레렉렌려력련렬렵령례로록론롬롭롯료루룩룹룻뤄류륙률륭르른름릇릎리릭린림립릿링마막만많말맑맘맙맛망맞맡맣매맥맨맵맺머먹먼멀멈멋멍멎메멘멩며면멸명몇모목몬몰몸몹못몽묘무묵묶문묻물뭄뭇뭐뭘뭣므미민믿밀밉밌및밑바박밖반받발밝밟밤밥방밭배백뱀뱃뱉버번벌범법벗베벤벨벼벽변별볍병볕보복볶본볼봄봇봉뵈뵙부북분불붉붐붓붕붙뷰브븐블비빌빔빗빚빛빠빡빨빵빼뺏뺨뻐뻔뻗뼈뼉뽑뿌뿐쁘쁨사삭산살삶삼삿상새색샌생샤서석섞선설섬섭섯성세섹센셈셋셔션소속손솔솜솟송솥쇄쇠쇼수숙순숟술숨숫숭숲쉬쉰쉽슈스슨슬슴습슷승시식신싣실싫심십싯싱싶싸싹싼쌀쌍쌓써썩썰썹쎄쏘쏟쑤쓰쓴쓸씀씌씨씩씬씹씻아악안앉않알앓암압앗앙앞애액앨야약얀얄얇양얕얗얘어억언얹얻얼엄업없엇엉엊엌엎에엔엘여역연열엷염엽엿영옆예옛오옥온올옮옳옷옹와완왕왜왠외왼요욕용우욱운울움웃웅워원월웨웬위윗유육율으윽은을음응의이익인일읽잃임입잇있잊잎자작잔잖잘잠잡잣장잦재쟁쟤저적전절젊점접젓정젖제젠젯져조족존졸좀좁종좋좌죄주죽준줄줌줍중쥐즈즉즌즐즘증지직진질짐집짓징짙짚짜짝짧째쨌쩌쩍쩐쩔쩜쪽쫓쭈쭉찌찍찢차착찬찮찰참찻창찾채책챔챙처척천철첩첫청체쳐초촉촌촛총촬최추축춘출춤춥춧충취츠측츰층치칙친칠침칫칭카칸칼캄캐캠커컨컬컴컵컷케켓켜코콘콜콤콩쾌쿄쿠퀴크큰클큼키킬타탁탄탈탑탓탕태택탤터턱턴털텅테텍텔템토톤톨톱통퇴투툴툼퉁튀튜트특튼튿틀틈티틱팀팅파팎판팔팝패팩팬퍼퍽페펜펴편펼평폐포폭폰표푸푹풀품풍퓨프플픔피픽필핏핑하학한할함합항해핵핸햄햇행향허헌험헤헬혀현혈협형혜호혹혼홀홈홉홍화확환활황회획횟횡효후훈훌훔훨휘휴흉흐흑흔흘흙흡흥흩희흰히힘',
help='character label')
parser_recog.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser_recog.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
""" Model Architecture """
parser_recog.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser_recog.add_argument('--input_channel', type=int, default=1,
help='the number of input channel of Feature extractor')
parser_recog.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser_recog.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
args_recog= parser_recog.parse_args()
return args_craft , args_recog
def apply(self, image, timestamp, save_img=False):
#coordinate : list
save_log = False
pred, timestamp = detect_ocr(self, image, timestamp, save_img, save_log)
return pred, timestamp | 49.847716 | 1,047 | 0.694297 | import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import time
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
from utils import AttnLabelConverter
from model import Model
from demo import detect_ocr
from craft.craft import CRAFT
from collections import OrderedDict
lf.device = torch.device('cuda')
self.cuda = True
cudnn.benchmark = False
else:
self.device = torch.device('cpu')
self.cuda = False
cudnn.benchmark = True
self.net = CRAFT(self).to(self.device)
print('Loading detection weights from checkpoint ' + self.craft_trained_model)
self.net.load_state_dict(copyStateDict(torch.load(self.craft_trained_model, map_location=self.device)))
self.net.to(self.device)
self.converter = AttnLabelConverter(self.character)
self.num_class = len(self.converter.character)
if self.rgb:
self.input_channel = 3
self.model = Model(self, self.num_class).to(self.device)
print('Loading recognition weights from checkpoint %s' % self.saved_model)
self.model.load_state_dict(torch.load(self.saved_model, map_location=self.device))
self.model.to(self.device)
print('Initialization Done! It tooks {:.2f} sec.\n'.format(time.time() - start))
return True
def setup_parser(self):
parser_craft = argparse.ArgumentParser(description='CRAFT Text Detection')
parser_craft.add_argument('--craft_trained_model', default='weights/craft_mlt_25k.pth', type=str,
help='pretrained model')
parser_craft.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold')
parser_craft.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')
parser_craft.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold')
parser_craft.add_argument('--cuda', default=False, type=str2bool, help='Use cuda for inference')
parser_craft.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')
parser_craft.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')
parser_craft.add_argument('--poly', default=False, action='store_true', help='enable polygon type')
parser_craft.add_argument('--show_time', default=False, action='store_true', help='show processing time')
parser_craft.add_argument('--test_folder', default='/data/', type=str, help='folder path to input images')
parser_craft.add_argument('--result_folder', default='./results/', type=str, help='result folder path')
parser_craft.add_argument('--refine', default=False, action='store_true', help='enable link refiner')
parser_craft.add_argument('--refiner_model', default='weights/craft_refiner_CTW1500.pth', type=str,
help='pretrained refiner model')
args_craft = parser_craft.parse_args()
parser_recog = argparse.ArgumentParser(description='ocr recognition')
parser_recog.add_argument('--image_path', help='path to image_folder or image_file which contains text images')
parser_recog.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser_recog.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser_recog.add_argument('--saved_model', help="path to saved_model to evaluation")
parser_recog.add_argument('--logfilepath', help="path to log to demo")
parser_recog.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser_recog.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser_recog.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser_recog.add_argument('--rgb', action='store_true', help='use rgb input')
parser_recog.add_argument('--character', type=str,
default='0123456789abcdefghijklmnopqrstuvwxyz가각간갇갈감갑값갓강갖같갚갛개객걀걔거걱건걷걸검겁것겉게겨격겪견결겹경곁계고곡곤곧골곰곱곳공과관광괜괴굉교구국군굳굴굵굶굽궁권귀귓규균귤그극근글긁금급긋긍기긴길김깅깊까깍깎깐깔깜깝깡깥깨꺼꺾껌껍껏껑께껴꼬꼭꼴꼼꼽꽂꽃꽉꽤꾸꾼꿀꿈뀌끄끈끊끌끓끔끗끝끼낌나낙낚난날낡남납낫낭낮낯낱낳내냄냇냉냐냥너넉넌널넓넘넣네넥넷녀녁년념녕노녹논놀놈농높놓놔뇌뇨누눈눕뉘뉴늄느늑는늘늙능늦늬니닐님다닥닦단닫달닭닮담답닷당닿대댁댐댓더덕던덜덟덤덥덧덩덮데델도독돈돌돕돗동돼되된두둑둘둠둡둥뒤뒷드득든듣들듬듭듯등디딩딪따딱딴딸땀땅때땜떠떡떤떨떻떼또똑뚜뚫뚱뛰뜨뜩뜯뜰뜻띄라락란람랍랑랗래랜램랫략량러럭런럴럼럽럿렁렇레렉렌려력련렬렵령례로록론롬롭롯료루룩룹룻뤄류륙률륭르른름릇릎리릭린림립릿링마막만많말맑맘맙맛망맞맡맣매맥맨맵맺머먹먼멀멈멋멍멎메멘멩며면멸명몇모목몬몰몸몹못몽묘무묵묶문묻물뭄뭇뭐뭘뭣므미민믿밀밉밌및밑바박밖반받발밝밟밤밥방밭배백뱀뱃뱉버번벌범법벗베벤벨벼벽변별볍병볕보복볶본볼봄봇봉뵈뵙부북분불붉붐붓붕붙뷰브븐블비빌빔빗빚빛빠빡빨빵빼뺏뺨뻐뻔뻗뼈뼉뽑뿌뿐쁘쁨사삭산살삶삼삿상새색샌생샤서석섞선설섬섭섯성세섹센셈셋셔션소속손솔솜솟송솥쇄쇠쇼수숙순숟술숨숫숭숲쉬쉰쉽슈스슨슬슴습슷승시식신싣실싫심십싯싱싶싸싹싼쌀쌍쌓써썩썰썹쎄쏘쏟쑤쓰쓴쓸씀씌씨씩씬씹씻아악안앉않알앓암압앗앙앞애액앨야약얀얄얇양얕얗얘어억언얹얻얼엄업없엇엉엊엌엎에엔엘여역연열엷염엽엿영옆예옛오옥온올옮옳옷옹와완왕왜왠외왼요욕용우욱운울움웃웅워원월웨웬위윗유육율으윽은을음응의이익인일읽잃임입잇있잊잎자작잔잖잘잠잡잣장잦재쟁쟤저적전절젊점접젓정젖제젠젯져조족존졸좀좁종좋좌죄주죽준줄줌줍중쥐즈즉즌즐즘증지직진질짐집짓징짙짚짜짝짧째쨌쩌쩍쩐쩔쩜쪽쫓쭈쭉찌찍찢차착찬찮찰참찻창찾채책챔챙처척천철첩첫청체쳐초촉촌촛총촬최추축춘출춤춥춧충취츠측츰층치칙친칠침칫칭카칸칼캄캐캠커컨컬컴컵컷케켓켜코콘콜콤콩쾌쿄쿠퀴크큰클큼키킬타탁탄탈탑탓탕태택탤터턱턴털텅테텍텔템토톤톨톱통퇴투툴툼퉁튀튜트특튼튿틀틈티틱팀팅파팎판팔팝패팩팬퍼퍽페펜펴편펼평폐포폭폰표푸푹풀품풍퓨프플픔피픽필핏핑하학한할함합항해핵핸햄햇행향허헌험헤헬혀현혈협형혜호혹혼홀홈홉홍화확환활황회획횟횡효후훈훌훔훨휘휴흉흐흑흔흘흙흡흥흩희흰히힘',
help='character label')
parser_recog.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser_recog.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser_recog.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser_recog.add_argument('--input_channel', type=int, default=1,
help='the number of input channel of Feature extractor')
parser_recog.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser_recog.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
args_recog= parser_recog.parse_args()
return args_craft , args_recog
def apply(self, image, timestamp, save_img=False):
save_log = False
pred, timestamp = detect_ocr(self, image, timestamp, save_img, save_log)
return pred, timestamp | true | true |
f728670877d053e632b4338aad097a6f4ee52456 | 31,108 | py | Python | cherrypy/lib/sessions.py | marlenebDC/cherrypy | b68087d0d62a8817e90a509a89afad0268052bc2 | [
"BSD-3-Clause"
] | null | null | null | cherrypy/lib/sessions.py | marlenebDC/cherrypy | b68087d0d62a8817e90a509a89afad0268052bc2 | [
"BSD-3-Clause"
] | null | null | null | cherrypy/lib/sessions.py | marlenebDC/cherrypy | b68087d0d62a8817e90a509a89afad0268052bc2 | [
"BSD-3-Clause"
] | 1 | 2019-11-22T16:17:17.000Z | 2019-11-22T16:17:17.000Z | """Session implementation for CherryPy.
You need to edit your config file to use sessions. Here's an example::
[/]
tools.sessions.on = True
tools.sessions.storage_class = cherrypy.lib.sessions.FileSession
tools.sessions.storage_path = "/home/site/sessions"
tools.sessions.timeout = 60
This sets the session to be stored in files in the directory
/home/site/sessions, and the session timeout to 60 minutes. If you omit
``storage_class``, the sessions will be saved in RAM.
``tools.sessions.on`` is the only required line for working sessions,
the rest are optional.
By default, the session ID is passed in a cookie, so the client's browser must
have cookies enabled for your site.
To set data for the current session, use
``cherrypy.session['fieldname'] = 'fieldvalue'``;
to get data use ``cherrypy.session.get('fieldname')``.
================
Locking sessions
================
By default, the ``'locking'`` mode of sessions is ``'implicit'``, which means
the session is locked early and unlocked late. Be mindful of this default mode
for any requests that take a long time to process (streaming responses,
expensive calculations, database lookups, API calls, etc), as other concurrent
requests that also utilize sessions will hang until the session is unlocked.
If you want to control when the session data is locked and unlocked,
set ``tools.sessions.locking = 'explicit'``. Then call
``cherrypy.session.acquire_lock()`` and ``cherrypy.session.release_lock()``.
Regardless of which mode you use, the session is guaranteed to be unlocked when
the request is complete.
=================
Expiring Sessions
=================
You can force a session to expire with :func:`cherrypy.lib.sessions.expire`.
Simply call that function at the point you want the session to expire, and it
will cause the session cookie to expire client-side.
===========================
Session Fixation Protection
===========================
If CherryPy receives, via a request cookie, a session id that it does not
recognize, it will reject that id and create a new one to return in the
response cookie. This `helps prevent session fixation attacks
<http://en.wikipedia.org/wiki/Session_fixation#Regenerate_SID_on_each_request>`_.
However, CherryPy "recognizes" a session id by looking up the saved session
data for that id. Therefore, if you never save any session data,
**you will get a new session id for every request**.
A side effect of CherryPy overwriting unrecognised session ids is that if you
have multiple, separate CherryPy applications running on a single domain (e.g.
on different ports), each app will overwrite the other's session id because by
default they use the same cookie name (``"session_id"``) but do not recognise
each others sessions. It is therefore a good idea to use a different name for
each, for example::
[/]
...
tools.sessions.name = "my_app_session_id"
================
Sharing Sessions
================
If you run multiple instances of CherryPy (for example via mod_python behind
Apache prefork), you most likely cannot use the RAM session backend, since each
instance of CherryPy will have its own memory space. Use a different backend
instead, and verify that all instances are pointing at the same file or db
location. Alternately, you might try a load balancer which makes sessions
"sticky". Google is your friend, there.
================
Expiration Dates
================
The response cookie will possess an expiration date to inform the client at
which point to stop sending the cookie back in requests. If the server time
and client time differ, expect sessions to be unreliable. **Make sure the
system time of your server is accurate**.
CherryPy defaults to a 60-minute session timeout, which also applies to the
cookie which is sent to the client. Unfortunately, some versions of Safari
("4 public beta" on Windows XP at least) appear to have a bug in their parsing
of the GMT expiration date--they appear to interpret the date as one hour in
the past. Sixty minutes minus one hour is pretty close to zero, so you may
experience this bug as a new session id for every request, unless the requests
are less than one second apart. To fix, try increasing the session.timeout.
On the other extreme, some users report Firefox sending cookies after their
expiration date, although this was on a system with an inaccurate system time.
Maybe FF doesn't trust system time.
"""
import sys
import datetime
import os
import time
import threading
import binascii
import pickle
import contextlib
import zc.lockfile
import cherrypy
from cherrypy.lib import httputil
from cherrypy.lib import locking
from cherrypy.lib import is_iterator
missing = object()
class Session(object):
"""A CherryPy dict-like Session object (one per request)."""
_id = None
id_observers = None
"A list of callbacks to which to pass new id's."
@property
def id(self):
"""Return the current session id."""
return self._id
@id.setter
def id(self, value):
self._id = value
for o in self.id_observers:
o(value)
timeout = 60
'Number of minutes after which to delete session data.'
locked = False
"""
If True, this session instance has exclusive read/write access
to session data."""
loaded = False
"""
If True, data has been retrieved from storage. This should happen
automatically on the first attempt to access session data."""
clean_thread = None
'Class-level Monitor which calls self.clean_up.'
clean_freq = 5
'The poll rate for expired session cleanup in minutes.'
originalid = None
'The session id passed by the client. May be missing or unsafe.'
missing = False
'True if the session requested by the client did not exist.'
regenerated = False
"""
True if the application called session.regenerate(). This is not set by
internal calls to regenerate the session id."""
debug = False
'If True, log debug information.'
# --------------------- Session management methods --------------------- #
def __init__(self, id=None, **kwargs):
self.id_observers = []
self._data = {}
for k, v in kwargs.items():
setattr(self, k, v)
self.originalid = id
self.missing = False
if id is None:
if self.debug:
cherrypy.log('No id given; making a new one', 'TOOLS.SESSIONS')
self._regenerate()
else:
self.id = id
if self._exists():
if self.debug:
cherrypy.log('Set id to %s.' % id, 'TOOLS.SESSIONS')
else:
if self.debug:
cherrypy.log('Expired or malicious session %r; '
'making a new one' % id, 'TOOLS.SESSIONS')
# Expired or malicious session. Make a new one.
# See https://github.com/cherrypy/cherrypy/issues/709.
self.id = None
self.missing = True
self._regenerate()
def now(self):
"""Generate the session specific concept of 'now'.
Other session providers can override this to use alternative,
possibly timezone aware, versions of 'now'.
"""
return datetime.datetime.now()
def regenerate(self):
"""Replace the current session (with a new id)."""
self.regenerated = True
self._regenerate()
def _regenerate(self):
if self.id is not None:
if self.debug:
cherrypy.log(
'Deleting the existing session %r before '
'regeneration.' % self.id,
'TOOLS.SESSIONS')
self.delete()
old_session_was_locked = self.locked
if old_session_was_locked:
self.release_lock()
if self.debug:
cherrypy.log('Old lock released.', 'TOOLS.SESSIONS')
self.id = None
while self.id is None:
self.id = self.generate_id()
# Assert that the generated id is not already stored.
if self._exists():
self.id = None
if self.debug:
cherrypy.log('Set id to generated %s.' % self.id,
'TOOLS.SESSIONS')
if old_session_was_locked:
self.acquire_lock()
if self.debug:
cherrypy.log('Regenerated lock acquired.', 'TOOLS.SESSIONS')
def clean_up(self):
"""Clean up expired sessions."""
pass
def generate_id(self):
"""Return a new session id."""
return binascii.hexlify(os.urandom(20)).decode('ascii')
def save(self):
"""Save session data."""
try:
# If session data has never been loaded then it's never been
# accessed: no need to save it
if self.loaded:
t = datetime.timedelta(seconds=self.timeout * 60)
expiration_time = self.now() + t
if self.debug:
cherrypy.log('Saving session %r with expiry %s' %
(self.id, expiration_time),
'TOOLS.SESSIONS')
self._save(expiration_time)
else:
if self.debug:
cherrypy.log(
'Skipping save of session %r (no session loaded).' %
self.id, 'TOOLS.SESSIONS')
finally:
if self.locked:
# Always release the lock if the user didn't release it
self.release_lock()
if self.debug:
cherrypy.log('Lock released after save.', 'TOOLS.SESSIONS')
def load(self):
"""Copy stored session data into this session instance."""
data = self._load()
# data is either None or a tuple (session_data, expiration_time)
if data is None or data[1] < self.now():
if self.debug:
cherrypy.log('Expired session %r, flushing data.' % self.id,
'TOOLS.SESSIONS')
self._data = {}
else:
if self.debug:
cherrypy.log('Data loaded for session %r.' % self.id,
'TOOLS.SESSIONS')
self._data = data[0]
self.loaded = True
# Stick the clean_thread in the class, not the instance.
# The instances are created and destroyed per-request.
cls = self.__class__
if self.clean_freq and not cls.clean_thread:
# clean_up is an instancemethod and not a classmethod,
# so that tool config can be accessed inside the method.
t = cherrypy.process.plugins.Monitor(
cherrypy.engine, self.clean_up, self.clean_freq * 60,
name='Session cleanup')
t.subscribe()
cls.clean_thread = t
t.start()
if self.debug:
cherrypy.log('Started cleanup thread.', 'TOOLS.SESSIONS')
def delete(self):
"""Delete stored session data."""
self._delete()
if self.debug:
cherrypy.log('Deleted session %s.' % self.id,
'TOOLS.SESSIONS')
# -------------------- Application accessor methods -------------------- #
def __getitem__(self, key):
if not self.loaded:
self.load()
return self._data[key]
def __setitem__(self, key, value):
if not self.loaded:
self.load()
self._data[key] = value
def __delitem__(self, key):
if not self.loaded:
self.load()
del self._data[key]
def pop(self, key, default=missing):
"""Remove the specified key and return the corresponding value.
If key is not found, default is returned if given,
otherwise KeyError is raised.
"""
if not self.loaded:
self.load()
if default is missing:
return self._data.pop(key)
else:
return self._data.pop(key, default)
def __contains__(self, key):
if not self.loaded:
self.load()
return key in self._data
def get(self, key, default=None):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
if not self.loaded:
self.load()
return self._data.get(key, default)
def update(self, d):
"""D.update(E) -> None. Update D from E: for k in E: D[k] = E[k]."""
if not self.loaded:
self.load()
self._data.update(d)
def setdefault(self, key, default=None):
"""D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D."""
if not self.loaded:
self.load()
return self._data.setdefault(key, default)
def clear(self):
"""D.clear() -> None. Remove all items from D."""
if not self.loaded:
self.load()
self._data.clear()
def keys(self):
"""D.keys() -> list of D's keys."""
if not self.loaded:
self.load()
return self._data.keys()
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples."""
if not self.loaded:
self.load()
return self._data.items()
def values(self):
"""D.values() -> list of D's values."""
if not self.loaded:
self.load()
return self._data.values()
class RamSession(Session):
# Class-level objects. Don't rebind these!
cache = {}
locks = {}
def clean_up(self):
"""Clean up expired sessions."""
now = self.now()
for _id, (data, expiration_time) in self.cache.copy().items():
if expiration_time <= now:
try:
del self.cache[_id]
except KeyError:
pass
try:
if self.locks[_id].acquire(blocking=False):
lock = self.locks.pop(_id)
lock.release()
except KeyError:
pass
# added to remove obsolete lock objects
for _id in list(self.locks):
locked = (
_id not in self.cache
and self.locks[_id].acquire(blocking=False)
)
if locked:
lock = self.locks.pop(_id)
lock.release()
def _exists(self):
return self.id in self.cache
def _load(self):
return self.cache.get(self.id)
def _save(self, expiration_time):
self.cache[self.id] = (self._data, expiration_time)
def _delete(self):
self.cache.pop(self.id, None)
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
self.locks[self.id].release()
self.locked = False
def __len__(self):
"""Return the number of active sessions."""
return len(self.cache)
class FileSession(Session):
"""Implementation of the File backend for sessions
storage_path
The folder where session data will be saved. Each session
will be saved as pickle.dump(data, expiration_time) in its own file;
the filename will be self.SESSION_PREFIX + self.id.
lock_timeout
A timedelta or numeric seconds indicating how long
to block acquiring a lock. If None (default), acquiring a lock
will block indefinitely.
"""
SESSION_PREFIX = 'session-'
LOCK_SUFFIX = '.lock'
pickle_protocol = pickle.HIGHEST_PROTOCOL
def __init__(self, id=None, **kwargs):
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
kwargs.setdefault('lock_timeout', None)
Session.__init__(self, id=id, **kwargs)
# validate self.lock_timeout
if isinstance(self.lock_timeout, (int, float)):
self.lock_timeout = datetime.timedelta(seconds=self.lock_timeout)
if not isinstance(self.lock_timeout, (datetime.timedelta, type(None))):
raise ValueError(
'Lock timeout must be numeric seconds or a timedelta instance.'
)
@classmethod
def setup(cls, **kwargs):
"""Set up the storage system for file-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool does).
"""
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
for k, v in kwargs.items():
setattr(cls, k, v)
def _get_file_path(self):
f = os.path.join(self.storage_path, self.SESSION_PREFIX + self.id)
if not os.path.abspath(f).startswith(self.storage_path):
raise cherrypy.HTTPError(400, 'Invalid session id in cookie.')
return f
def _exists(self):
path = self._get_file_path()
return os.path.exists(path)
def _load(self, path=None):
assert self.locked, ('The session load without being locked. '
"Check your tools' priority levels.")
if path is None:
path = self._get_file_path()
try:
f = open(path, 'rb')
try:
return pickle.load(f)
finally:
f.close()
except (IOError, EOFError):
e = sys.exc_info()[1]
if self.debug:
cherrypy.log('Error loading the session pickle: %s' %
e, 'TOOLS.SESSIONS')
return None
def _save(self, expiration_time):
assert self.locked, ('The session was saved without being locked. '
"Check your tools' priority levels.")
f = open(self._get_file_path(), 'wb')
try:
pickle.dump((self._data, expiration_time), f, self.pickle_protocol)
finally:
f.close()
def _delete(self):
assert self.locked, ('The session deletion without being locked. '
"Check your tools' priority levels.")
try:
os.unlink(self._get_file_path())
except OSError:
pass
def acquire_lock(self, path=None):
"""Acquire an exclusive lock on the currently-loaded session data."""
if path is None:
path = self._get_file_path()
path += self.LOCK_SUFFIX
checker = locking.LockChecker(self.id, self.lock_timeout)
while not checker.expired():
try:
self.lock = zc.lockfile.LockFile(path)
except zc.lockfile.LockError:
time.sleep(0.1)
else:
break
self.locked = True
if self.debug:
cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def release_lock(self, path=None):
"""Release the lock on the currently-loaded session data."""
self.lock.close()
with contextlib.suppress(FileNotFoundError):
os.remove(self.lock._path)
self.locked = False
def clean_up(self):
"""Clean up expired sessions."""
now = self.now()
# Iterate over all session files in self.storage_path
for fname in os.listdir(self.storage_path):
have_session = (
fname.startswith(self.SESSION_PREFIX)
and not fname.endswith(self.LOCK_SUFFIX)
)
if have_session:
# We have a session file: lock and load it and check
# if it's expired. If it fails, nevermind.
path = os.path.join(self.storage_path, fname)
self.acquire_lock(path)
if self.debug:
# This is a bit of a hack, since we're calling clean_up
# on the first instance rather than the entire class,
# so depending on whether you have "debug" set on the
# path of the first session called, this may not run.
cherrypy.log('Cleanup lock acquired.', 'TOOLS.SESSIONS')
try:
contents = self._load(path)
# _load returns None on IOError
if contents is not None:
data, expiration_time = contents
if expiration_time < now:
# Session expired: deleting it
os.unlink(path)
finally:
self.release_lock(path)
def __len__(self):
"""Return the number of active sessions."""
return len([fname for fname in os.listdir(self.storage_path)
if (fname.startswith(self.SESSION_PREFIX) and
not fname.endswith(self.LOCK_SUFFIX))])
class MemcachedSession(Session):
# The most popular memcached client for Python isn't thread-safe.
# Wrap all .get and .set operations in a single lock.
mc_lock = threading.RLock()
# This is a separate set of locks per session id.
locks = {}
servers = ['localhost:11211']
@classmethod
def setup(cls, **kwargs):
"""Set up the storage system for memcached-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool does).
"""
for k, v in kwargs.items():
setattr(cls, k, v)
import memcache
cls.cache = memcache.Client(cls.servers)
def _exists(self):
self.mc_lock.acquire()
try:
return bool(self.cache.get(self.id))
finally:
self.mc_lock.release()
def _load(self):
self.mc_lock.acquire()
try:
return self.cache.get(self.id)
finally:
self.mc_lock.release()
def _save(self, expiration_time):
# Send the expiration time as "Unix time" (seconds since 1/1/1970)
td = int(time.mktime(expiration_time.timetuple()))
self.mc_lock.acquire()
try:
if not self.cache.set(self.id, (self._data, expiration_time), td):
raise AssertionError(
'Session data for id %r not set.' % self.id)
finally:
self.mc_lock.release()
def _delete(self):
self.cache.delete(self.id)
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
if self.debug:
cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
self.locks[self.id].release()
self.locked = False
def __len__(self):
"""Return the number of active sessions."""
raise NotImplementedError
# Hook functions (for CherryPy tools)
def save():
"""Save any changed session data."""
if not hasattr(cherrypy.serving, 'session'):
return
request = cherrypy.serving.request
response = cherrypy.serving.response
# Guard against running twice
if hasattr(request, '_sessionsaved'):
return
request._sessionsaved = True
if response.stream:
# If the body is being streamed, we have to save the data
# *after* the response has been written out
request.hooks.attach('on_end_request', cherrypy.session.save)
else:
# If the body is not being streamed, we save the data now
# (so we can release the lock).
if is_iterator(response.body):
response.collapse_body()
cherrypy.session.save()
save.failsafe = True
def close():
"""Close the session object for this request."""
sess = getattr(cherrypy.serving, 'session', None)
if getattr(sess, 'locked', False):
# If the session is still locked we release the lock
sess.release_lock()
if sess.debug:
cherrypy.log('Lock released on close.', 'TOOLS.SESSIONS')
close.failsafe = True
close.priority = 90
def init(storage_type=None, path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False, clean_freq=5,
persistent=True, httponly=False, debug=False,
# Py27 compat
# *, storage_class=RamSession,
**kwargs):
"""Initialize session object (using cookies).
storage_class
The Session subclass to use. Defaults to RamSession.
storage_type
(deprecated)
One of 'ram', 'file', memcached'. This will be
used to look up the corresponding class in cherrypy.lib.sessions
globals. For example, 'file' will use the FileSession class.
path
The 'path' value to stick in the response cookie metadata.
path_header
If 'path' is None (the default), then the response
cookie 'path' will be pulled from request.headers[path_header].
name
The name of the cookie.
timeout
The expiration timeout (in minutes) for the stored session data.
If 'persistent' is True (the default), this is also the timeout
for the cookie.
domain
The cookie domain.
secure
If False (the default) the cookie 'secure' value will not
be set. If True, the cookie 'secure' value will be set (to 1).
clean_freq (minutes)
The poll rate for expired session cleanup.
persistent
If True (the default), the 'timeout' argument will be used
to expire the cookie. If False, the cookie will not have an expiry,
and the cookie will be a "session cookie" which expires when the
browser is closed.
httponly
If False (the default) the cookie 'httponly' value will not be set.
If True, the cookie 'httponly' value will be set (to 1).
Any additional kwargs will be bound to the new Session instance,
and may be specific to the storage type. See the subclass of Session
you're using for more information.
"""
# Py27 compat
storage_class = kwargs.pop('storage_class', RamSession)
request = cherrypy.serving.request
# Guard against running twice
if hasattr(request, '_session_init_flag'):
return
request._session_init_flag = True
# Check if request came with a session ID
id = None
if name in request.cookie:
id = request.cookie[name].value
if debug:
cherrypy.log('ID obtained from request.cookie: %r' % id,
'TOOLS.SESSIONS')
first_time = not hasattr(cherrypy, 'session')
if storage_type:
if first_time:
msg = 'storage_type is deprecated. Supply storage_class instead'
cherrypy.log(msg)
storage_class = storage_type.title() + 'Session'
storage_class = globals()[storage_class]
# call setup first time only
if first_time:
if hasattr(storage_class, 'setup'):
storage_class.setup(**kwargs)
# Create and attach a new Session instance to cherrypy.serving.
# It will possess a reference to (and lock, and lazily load)
# the requested session data.
kwargs['timeout'] = timeout
kwargs['clean_freq'] = clean_freq
cherrypy.serving.session = sess = storage_class(id, **kwargs)
sess.debug = debug
def update_cookie(id):
"""Update the cookie every time the session id changes."""
cherrypy.serving.response.cookie[name] = id
sess.id_observers.append(update_cookie)
# Create cherrypy.session which will proxy to cherrypy.serving.session
if not hasattr(cherrypy, 'session'):
cherrypy.session = cherrypy._ThreadLocalProxy('session')
if persistent:
cookie_timeout = timeout
else:
# See http://support.microsoft.com/kb/223799/EN-US/
# and http://support.mozilla.com/en-US/kb/Cookies
cookie_timeout = None
set_response_cookie(path=path, path_header=path_header, name=name,
timeout=cookie_timeout, domain=domain, secure=secure,
httponly=httponly)
def set_response_cookie(path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False, httponly=False):
"""Set a response cookie for the client.
path
the 'path' value to stick in the response cookie metadata.
path_header
if 'path' is None (the default), then the response
cookie 'path' will be pulled from request.headers[path_header].
name
the name of the cookie.
timeout
the expiration timeout for the cookie. If 0 or other boolean
False, no 'expires' param will be set, and the cookie will be a
"session cookie" which expires when the browser is closed.
domain
the cookie domain.
secure
if False (the default) the cookie 'secure' value will not
be set. If True, the cookie 'secure' value will be set (to 1).
httponly
If False (the default) the cookie 'httponly' value will not be set.
If True, the cookie 'httponly' value will be set (to 1).
"""
# Set response cookie
cookie = cherrypy.serving.response.cookie
cookie[name] = cherrypy.serving.session.id
cookie[name]['path'] = (
path or
cherrypy.serving.request.headers.get(path_header) or
'/'
)
if timeout:
cookie[name]['max-age'] = timeout * 60
_add_MSIE_max_age_workaround(cookie[name], timeout)
if domain is not None:
cookie[name]['domain'] = domain
if secure:
cookie[name]['secure'] = 1
if httponly:
if not cookie[name].isReservedKey('httponly'):
raise ValueError('The httponly cookie token is not supported.')
cookie[name]['httponly'] = 1
def _add_MSIE_max_age_workaround(cookie, timeout):
"""
We'd like to use the "max-age" param as indicated in
http://www.faqs.org/rfcs/rfc2109.html but IE doesn't
save it to disk and the session is lost if people close
the browser. So we have to use the old "expires" ... sigh ...
"""
expires = time.time() + timeout * 60
cookie['expires'] = httputil.HTTPDate(expires)
def expire():
"""Expire the current session cookie."""
name = cherrypy.serving.request.config.get(
'tools.sessions.name', 'session_id')
one_year = 60 * 60 * 24 * 365
e = time.time() - one_year
cherrypy.serving.response.cookie[name]['expires'] = httputil.HTTPDate(e)
cherrypy.serving.response.cookie[name].pop('max-age', None)
| 34.035011 | 81 | 0.604764 | import sys
import datetime
import os
import time
import threading
import binascii
import pickle
import contextlib
import zc.lockfile
import cherrypy
from cherrypy.lib import httputil
from cherrypy.lib import locking
from cherrypy.lib import is_iterator
missing = object()
class Session(object):
_id = None
id_observers = None
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
for o in self.id_observers:
o(value)
timeout = 60
locked = False
loaded = False
clean_thread = None
clean_freq = 5
originalid = None
missing = False
regenerated = False
debug = False
def __init__(self, id=None, **kwargs):
self.id_observers = []
self._data = {}
for k, v in kwargs.items():
setattr(self, k, v)
self.originalid = id
self.missing = False
if id is None:
if self.debug:
cherrypy.log('No id given; making a new one', 'TOOLS.SESSIONS')
self._regenerate()
else:
self.id = id
if self._exists():
if self.debug:
cherrypy.log('Set id to %s.' % id, 'TOOLS.SESSIONS')
else:
if self.debug:
cherrypy.log('Expired or malicious session %r; '
'making a new one' % id, 'TOOLS.SESSIONS')
self.id = None
self.missing = True
self._regenerate()
def now(self):
return datetime.datetime.now()
def regenerate(self):
self.regenerated = True
self._regenerate()
def _regenerate(self):
if self.id is not None:
if self.debug:
cherrypy.log(
'Deleting the existing session %r before '
'regeneration.' % self.id,
'TOOLS.SESSIONS')
self.delete()
old_session_was_locked = self.locked
if old_session_was_locked:
self.release_lock()
if self.debug:
cherrypy.log('Old lock released.', 'TOOLS.SESSIONS')
self.id = None
while self.id is None:
self.id = self.generate_id()
if self._exists():
self.id = None
if self.debug:
cherrypy.log('Set id to generated %s.' % self.id,
'TOOLS.SESSIONS')
if old_session_was_locked:
self.acquire_lock()
if self.debug:
cherrypy.log('Regenerated lock acquired.', 'TOOLS.SESSIONS')
def clean_up(self):
pass
def generate_id(self):
return binascii.hexlify(os.urandom(20)).decode('ascii')
def save(self):
try:
# accessed: no need to save it
if self.loaded:
t = datetime.timedelta(seconds=self.timeout * 60)
expiration_time = self.now() + t
if self.debug:
cherrypy.log('Saving session %r with expiry %s' %
(self.id, expiration_time),
'TOOLS.SESSIONS')
self._save(expiration_time)
else:
if self.debug:
cherrypy.log(
'Skipping save of session %r (no session loaded).' %
self.id, 'TOOLS.SESSIONS')
finally:
if self.locked:
# Always release the lock if the user didn't release it
self.release_lock()
if self.debug:
cherrypy.log('Lock released after save.', 'TOOLS.SESSIONS')
def load(self):
data = self._load()
if data is None or data[1] < self.now():
if self.debug:
cherrypy.log('Expired session %r, flushing data.' % self.id,
'TOOLS.SESSIONS')
self._data = {}
else:
if self.debug:
cherrypy.log('Data loaded for session %r.' % self.id,
'TOOLS.SESSIONS')
self._data = data[0]
self.loaded = True
cls = self.__class__
if self.clean_freq and not cls.clean_thread:
t = cherrypy.process.plugins.Monitor(
cherrypy.engine, self.clean_up, self.clean_freq * 60,
name='Session cleanup')
t.subscribe()
cls.clean_thread = t
t.start()
if self.debug:
cherrypy.log('Started cleanup thread.', 'TOOLS.SESSIONS')
def delete(self):
self._delete()
if self.debug:
cherrypy.log('Deleted session %s.' % self.id,
'TOOLS.SESSIONS')
def __getitem__(self, key):
if not self.loaded:
self.load()
return self._data[key]
def __setitem__(self, key, value):
if not self.loaded:
self.load()
self._data[key] = value
def __delitem__(self, key):
if not self.loaded:
self.load()
del self._data[key]
def pop(self, key, default=missing):
if not self.loaded:
self.load()
if default is missing:
return self._data.pop(key)
else:
return self._data.pop(key, default)
def __contains__(self, key):
if not self.loaded:
self.load()
return key in self._data
def get(self, key, default=None):
if not self.loaded:
self.load()
return self._data.get(key, default)
def update(self, d):
if not self.loaded:
self.load()
self._data.update(d)
def setdefault(self, key, default=None):
if not self.loaded:
self.load()
return self._data.setdefault(key, default)
def clear(self):
if not self.loaded:
self.load()
self._data.clear()
def keys(self):
if not self.loaded:
self.load()
return self._data.keys()
def items(self):
if not self.loaded:
self.load()
return self._data.items()
def values(self):
if not self.loaded:
self.load()
return self._data.values()
class RamSession(Session):
cache = {}
locks = {}
def clean_up(self):
now = self.now()
for _id, (data, expiration_time) in self.cache.copy().items():
if expiration_time <= now:
try:
del self.cache[_id]
except KeyError:
pass
try:
if self.locks[_id].acquire(blocking=False):
lock = self.locks.pop(_id)
lock.release()
except KeyError:
pass
# added to remove obsolete lock objects
for _id in list(self.locks):
locked = (
_id not in self.cache
and self.locks[_id].acquire(blocking=False)
)
if locked:
lock = self.locks.pop(_id)
lock.release()
def _exists(self):
return self.id in self.cache
def _load(self):
return self.cache.get(self.id)
def _save(self, expiration_time):
self.cache[self.id] = (self._data, expiration_time)
def _delete(self):
self.cache.pop(self.id, None)
def acquire_lock(self):
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
def release_lock(self):
self.locks[self.id].release()
self.locked = False
def __len__(self):
return len(self.cache)
class FileSession(Session):
SESSION_PREFIX = 'session-'
LOCK_SUFFIX = '.lock'
pickle_protocol = pickle.HIGHEST_PROTOCOL
def __init__(self, id=None, **kwargs):
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
kwargs.setdefault('lock_timeout', None)
Session.__init__(self, id=id, **kwargs)
# validate self.lock_timeout
if isinstance(self.lock_timeout, (int, float)):
self.lock_timeout = datetime.timedelta(seconds=self.lock_timeout)
if not isinstance(self.lock_timeout, (datetime.timedelta, type(None))):
raise ValueError(
'Lock timeout must be numeric seconds or a timedelta instance.'
)
@classmethod
def setup(cls, **kwargs):
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
for k, v in kwargs.items():
setattr(cls, k, v)
def _get_file_path(self):
f = os.path.join(self.storage_path, self.SESSION_PREFIX + self.id)
if not os.path.abspath(f).startswith(self.storage_path):
raise cherrypy.HTTPError(400, 'Invalid session id in cookie.')
return f
def _exists(self):
path = self._get_file_path()
return os.path.exists(path)
def _load(self, path=None):
assert self.locked, ('The session load without being locked. '
"Check your tools' priority levels.")
if path is None:
path = self._get_file_path()
try:
f = open(path, 'rb')
try:
return pickle.load(f)
finally:
f.close()
except (IOError, EOFError):
e = sys.exc_info()[1]
if self.debug:
cherrypy.log('Error loading the session pickle: %s' %
e, 'TOOLS.SESSIONS')
return None
def _save(self, expiration_time):
assert self.locked, ('The session was saved without being locked. '
"Check your tools' priority levels.")
f = open(self._get_file_path(), 'wb')
try:
pickle.dump((self._data, expiration_time), f, self.pickle_protocol)
finally:
f.close()
def _delete(self):
assert self.locked, ('The session deletion without being locked. '
"Check your tools' priority levels.")
try:
os.unlink(self._get_file_path())
except OSError:
pass
def acquire_lock(self, path=None):
if path is None:
path = self._get_file_path()
path += self.LOCK_SUFFIX
checker = locking.LockChecker(self.id, self.lock_timeout)
while not checker.expired():
try:
self.lock = zc.lockfile.LockFile(path)
except zc.lockfile.LockError:
time.sleep(0.1)
else:
break
self.locked = True
if self.debug:
cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def release_lock(self, path=None):
self.lock.close()
with contextlib.suppress(FileNotFoundError):
os.remove(self.lock._path)
self.locked = False
def clean_up(self):
now = self.now()
for fname in os.listdir(self.storage_path):
have_session = (
fname.startswith(self.SESSION_PREFIX)
and not fname.endswith(self.LOCK_SUFFIX)
)
if have_session:
path = os.path.join(self.storage_path, fname)
self.acquire_lock(path)
if self.debug:
# This is a bit of a hack, since we're calling clean_up
cherrypy.log('Cleanup lock acquired.', 'TOOLS.SESSIONS')
try:
contents = self._load(path)
if contents is not None:
data, expiration_time = contents
if expiration_time < now:
os.unlink(path)
finally:
self.release_lock(path)
def __len__(self):
return len([fname for fname in os.listdir(self.storage_path)
if (fname.startswith(self.SESSION_PREFIX) and
not fname.endswith(self.LOCK_SUFFIX))])
class MemcachedSession(Session):
# Wrap all .get and .set operations in a single lock.
mc_lock = threading.RLock()
# This is a separate set of locks per session id.
locks = {}
servers = ['localhost:11211']
@classmethod
def setup(cls, **kwargs):
for k, v in kwargs.items():
setattr(cls, k, v)
import memcache
cls.cache = memcache.Client(cls.servers)
def _exists(self):
self.mc_lock.acquire()
try:
return bool(self.cache.get(self.id))
finally:
self.mc_lock.release()
def _load(self):
self.mc_lock.acquire()
try:
return self.cache.get(self.id)
finally:
self.mc_lock.release()
def _save(self, expiration_time):
# Send the expiration time as "Unix time" (seconds since 1/1/1970)
td = int(time.mktime(expiration_time.timetuple()))
self.mc_lock.acquire()
try:
if not self.cache.set(self.id, (self._data, expiration_time), td):
raise AssertionError(
'Session data for id %r not set.' % self.id)
finally:
self.mc_lock.release()
def _delete(self):
self.cache.delete(self.id)
def acquire_lock(self):
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
if self.debug:
cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def release_lock(self):
self.locks[self.id].release()
self.locked = False
def __len__(self):
raise NotImplementedError
# Hook functions (for CherryPy tools)
def save():
if not hasattr(cherrypy.serving, 'session'):
return
request = cherrypy.serving.request
response = cherrypy.serving.response
# Guard against running twice
if hasattr(request, '_sessionsaved'):
return
request._sessionsaved = True
if response.stream:
# If the body is being streamed, we have to save the data
# *after* the response has been written out
request.hooks.attach('on_end_request', cherrypy.session.save)
else:
# If the body is not being streamed, we save the data now
# (so we can release the lock).
if is_iterator(response.body):
response.collapse_body()
cherrypy.session.save()
save.failsafe = True
def close():
sess = getattr(cherrypy.serving, 'session', None)
if getattr(sess, 'locked', False):
# If the session is still locked we release the lock
sess.release_lock()
if sess.debug:
cherrypy.log('Lock released on close.', 'TOOLS.SESSIONS')
close.failsafe = True
close.priority = 90
def init(storage_type=None, path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False, clean_freq=5,
persistent=True, httponly=False, debug=False,
# Py27 compat
# *, storage_class=RamSession,
**kwargs):
# Py27 compat
storage_class = kwargs.pop('storage_class', RamSession)
request = cherrypy.serving.request
# Guard against running twice
if hasattr(request, '_session_init_flag'):
return
request._session_init_flag = True
# Check if request came with a session ID
id = None
if name in request.cookie:
id = request.cookie[name].value
if debug:
cherrypy.log('ID obtained from request.cookie: %r' % id,
'TOOLS.SESSIONS')
first_time = not hasattr(cherrypy, 'session')
if storage_type:
if first_time:
msg = 'storage_type is deprecated. Supply storage_class instead'
cherrypy.log(msg)
storage_class = storage_type.title() + 'Session'
storage_class = globals()[storage_class]
# call setup first time only
if first_time:
if hasattr(storage_class, 'setup'):
storage_class.setup(**kwargs)
# Create and attach a new Session instance to cherrypy.serving.
# It will possess a reference to (and lock, and lazily load)
# the requested session data.
kwargs['timeout'] = timeout
kwargs['clean_freq'] = clean_freq
cherrypy.serving.session = sess = storage_class(id, **kwargs)
sess.debug = debug
def update_cookie(id):
cherrypy.serving.response.cookie[name] = id
sess.id_observers.append(update_cookie)
# Create cherrypy.session which will proxy to cherrypy.serving.session
if not hasattr(cherrypy, 'session'):
cherrypy.session = cherrypy._ThreadLocalProxy('session')
if persistent:
cookie_timeout = timeout
else:
# See http://support.microsoft.com/kb/223799/EN-US/
# and http://support.mozilla.com/en-US/kb/Cookies
cookie_timeout = None
set_response_cookie(path=path, path_header=path_header, name=name,
timeout=cookie_timeout, domain=domain, secure=secure,
httponly=httponly)
def set_response_cookie(path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False, httponly=False):
# Set response cookie
cookie = cherrypy.serving.response.cookie
cookie[name] = cherrypy.serving.session.id
cookie[name]['path'] = (
path or
cherrypy.serving.request.headers.get(path_header) or
'/'
)
if timeout:
cookie[name]['max-age'] = timeout * 60
_add_MSIE_max_age_workaround(cookie[name], timeout)
if domain is not None:
cookie[name]['domain'] = domain
if secure:
cookie[name]['secure'] = 1
if httponly:
if not cookie[name].isReservedKey('httponly'):
raise ValueError('The httponly cookie token is not supported.')
cookie[name]['httponly'] = 1
def _add_MSIE_max_age_workaround(cookie, timeout):
expires = time.time() + timeout * 60
cookie['expires'] = httputil.HTTPDate(expires)
def expire():
name = cherrypy.serving.request.config.get(
'tools.sessions.name', 'session_id')
one_year = 60 * 60 * 24 * 365
e = time.time() - one_year
cherrypy.serving.response.cookie[name]['expires'] = httputil.HTTPDate(e)
cherrypy.serving.response.cookie[name].pop('max-age', None)
| true | true |
f7286841416be878bd48fb75ac4def85bc0b75bf | 357 | py | Python | src/config.py | malberich/abtest_splitter | 0f66c0600462835b72ccbbf07484b49bb7b6006a | [
"MIT"
] | null | null | null | src/config.py | malberich/abtest_splitter | 0f66c0600462835b72ccbbf07484b49bb7b6006a | [
"MIT"
] | null | null | null | src/config.py | malberich/abtest_splitter | 0f66c0600462835b72ccbbf07484b49bb7b6006a | [
"MIT"
] | null | null | null | """Configuration file loader for the experiments configuration."""
import yaml
def load_config():
"""Load the app configuration file."""
with open('../conf/experiments.yaml', 'r') as config_file:
try:
return yaml.safe_load(config_file)
except yaml.YAMLError as exc:
print(exc)
EXPERIMENTS = load_config()
| 23.8 | 66 | 0.64986 | import yaml
def load_config():
with open('../conf/experiments.yaml', 'r') as config_file:
try:
return yaml.safe_load(config_file)
except yaml.YAMLError as exc:
print(exc)
EXPERIMENTS = load_config()
| true | true |
f7286985bee169e43770e47d44b7e012e96e2726 | 393 | py | Python | multiple-languages/python/ros-cdk-rocketmq-1.0.3/src/ros_cdk_rocketmq/_jsii/__init__.py | aliyun/Resource-Orchestration-Service-Cloud-Development-K | 2b81e135002ed81cb72f7d07be7ff497ea39e2e1 | [
"Apache-2.0"
] | 15 | 2020-11-10T02:00:28.000Z | 2022-02-07T19:28:10.000Z | multiple-languages/python/ros-cdk-rocketmq-1.0.3/src/ros_cdk_rocketmq/_jsii/__init__.py | aliyun/Resource-Orchestration-Service-Cloud-Development-K | 2b81e135002ed81cb72f7d07be7ff497ea39e2e1 | [
"Apache-2.0"
] | 23 | 2021-02-02T04:37:02.000Z | 2022-03-31T06:41:06.000Z | multiple-languages/python/ros-cdk-rocketmq-1.0.3/src/ros_cdk_rocketmq/_jsii/__init__.py | aliyun/Resource-Orchestration-Service-Cloud-Development-K | 2b81e135002ed81cb72f7d07be7ff497ea39e2e1 | [
"Apache-2.0"
] | 4 | 2021-01-13T05:48:43.000Z | 2022-03-15T11:26:48.000Z | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
import constructs._jsii
import ros_cdk_core._jsii
__jsii_assembly__ = jsii.JSIIAssembly.load(
"@alicloud/ros-cdk-rocketmq",
"1.0.3",
__name__[0:-6],
"ros-cdk-rocketmq@1.0.3.jsii.tgz",
)
__all__ = [
"__jsii_assembly__",
]
publication.publish()
| 15.115385 | 43 | 0.740458 | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
import constructs._jsii
import ros_cdk_core._jsii
__jsii_assembly__ = jsii.JSIIAssembly.load(
"@alicloud/ros-cdk-rocketmq",
"1.0.3",
__name__[0:-6],
"ros-cdk-rocketmq@1.0.3.jsii.tgz",
)
__all__ = [
"__jsii_assembly__",
]
publication.publish()
| true | true |
f7286a036e0c3708992636385b8a410efb00c43b | 1,666 | py | Python | dualtext_server/dualtext_api/views/project_views.py | mathislucka/dualtext | 70cf7734cd66e60e482e2df09a3341224687526f | [
"MIT"
] | 1 | 2021-08-16T20:21:11.000Z | 2021-08-16T20:21:11.000Z | dualtext_server/dualtext_api/views/project_views.py | mathislucka/dualtext | 70cf7734cd66e60e482e2df09a3341224687526f | [
"MIT"
] | null | null | null | dualtext_server/dualtext_api/views/project_views.py | mathislucka/dualtext | 70cf7734cd66e60e482e2df09a3341224687526f | [
"MIT"
] | null | null | null | from django.shortcuts import get_object_or_404
from rest_framework import generics, status
from rest_framework.response import Response
from rest_framework.views import APIView
from dualtext_api.models import Project
from dualtext_api.serializers import ProjectSerializer
from dualtext_api.permissions import MembersReadAdminEdit, AuthenticatedReadAdminCreate
from dualtext_api.services import ProjectService
class ProjectListView(generics.ListCreateAPIView):
serializer_class = ProjectSerializer
permission_classes = [AuthenticatedReadAdminCreate]
def perform_create(self, serializer):
serializer.save(creator=self.request.user)
def get_queryset(self):
user = self.request.user
queryset = Project.objects.all().prefetch_related('corpora', 'allowed_groups')
if not user.is_superuser:
user_groups = user.groups.all()
queryset = queryset.filter(allowed_groups__in=user_groups)
return queryset
class ProjectDetailView(generics.RetrieveUpdateDestroyAPIView):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
permission_classes = [MembersReadAdminEdit]
lookup_url_kwarg = 'project_id'
class ProjectStatisticsView(APIView):
def get(self, request, project_id):
project = get_object_or_404(Project, id=project_id)
permission = MembersReadAdminEdit()
if permission.has_object_permission(request, self, project):
ps = ProjectService(project_id)
statistics = ps.get_project_statistics()
return Response(statistics)
return Response('not permitted', status=status.HTTP_403_FORBIDDEN)
| 40.634146 | 87 | 0.762905 | from django.shortcuts import get_object_or_404
from rest_framework import generics, status
from rest_framework.response import Response
from rest_framework.views import APIView
from dualtext_api.models import Project
from dualtext_api.serializers import ProjectSerializer
from dualtext_api.permissions import MembersReadAdminEdit, AuthenticatedReadAdminCreate
from dualtext_api.services import ProjectService
class ProjectListView(generics.ListCreateAPIView):
serializer_class = ProjectSerializer
permission_classes = [AuthenticatedReadAdminCreate]
def perform_create(self, serializer):
serializer.save(creator=self.request.user)
def get_queryset(self):
user = self.request.user
queryset = Project.objects.all().prefetch_related('corpora', 'allowed_groups')
if not user.is_superuser:
user_groups = user.groups.all()
queryset = queryset.filter(allowed_groups__in=user_groups)
return queryset
class ProjectDetailView(generics.RetrieveUpdateDestroyAPIView):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
permission_classes = [MembersReadAdminEdit]
lookup_url_kwarg = 'project_id'
class ProjectStatisticsView(APIView):
def get(self, request, project_id):
project = get_object_or_404(Project, id=project_id)
permission = MembersReadAdminEdit()
if permission.has_object_permission(request, self, project):
ps = ProjectService(project_id)
statistics = ps.get_project_statistics()
return Response(statistics)
return Response('not permitted', status=status.HTTP_403_FORBIDDEN)
| true | true |
f7286a73b5108fab8b3568f250e7067d363a006c | 3,724 | py | Python | advice-road-crop/roadnet/train_valid_split.py | fada-catec/advice_AI4EU_experiment | fa65f771102d299bfc84722bed44337271f944aa | [
"MIT"
] | 6 | 2020-12-05T09:03:31.000Z | 2022-03-04T13:14:32.000Z | roadnet/train_valid_split.py | linbaiwpi/RoadNet-RT | 624a1051e0502b60abe6122450ea53f80e9e4f8a | [
"MIT"
] | null | null | null | roadnet/train_valid_split.py | linbaiwpi/RoadNet-RT | 624a1051e0502b60abe6122450ea53f80e9e4f8a | [
"MIT"
] | null | null | null | import os
import shutil
import random
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from PIL import Image
random.seed(2020)
IMG_CROP = True
# save gt_image_2 into gt_image, so that road is assigned to 255 and non-road is 0
train_gt_path = "../../data_road/training/gt_image_2/"
save_gt_path = "../../data_road/training/gt_image/"
gt_list = [f for f in os.listdir(train_gt_path) if f.endswith('.png')]
try:
shutil.rmtree(save_gt_path)
except OSError:
pass
os.mkdir(save_gt_path)
pbar = tqdm(total=289)
for gt in gt_list:
if "road" in gt:
img = np.array(Image.open(train_gt_path+gt))
height = img.shape[0]
width = img.shape[1]
gtId = np.zeros((height, width), dtype=np.uint8)
for i in range(height):
for j in range(width):
# print(img[i, j, :])
if sum(img[i, j, :] == [255, 0, 255]) == 3:
gtId[i, j] = 7
else:
gtId[i, j] = 0
gt_name = gt.split('_road_')
Image.fromarray(gtId).save(save_gt_path+gt_name[0]+'_'+gt_name[1])
pbar.update(1)
# split the training and validation data by 9:1
def traval_split(data_path, sub='um', seed=1):
random.seed(seed)
data_list = [f for f in os.listdir(data_path) if sub+'_' in f]
train_len = round(len(data_list)*0.9)
random.shuffle(data_list)
train_list = data_list[:train_len]
valid_list = data_list[train_len:]
# print(len(train_list))
# print(len(valid_list))
return train_list, valid_list
# load path
img_src_path = '../../data_road/training/image_2/'
gt_src_path = '../../data_road/training/gt_image/'
# save path
base_dir = '../../data_road_3/'
try:
shutil.rmtree(base_dir)
except OSError:
pass
os.mkdir(base_dir)
try:
shutil.rmtree(base_dir+'training')
except OSError:
pass
os.mkdir(base_dir+'training')
try:
shutil.rmtree(base_dir+'validation')
except OSError:
pass
os.mkdir(base_dir+'validation')
img_tra_path = base_dir+'training/image/'
gt_tra_path = base_dir+'training/gt_image/'
img_val_path = base_dir+'validation/image/'
gt_val_path = base_dir+'validation/gt_image/'
try:
shutil.rmtree(img_tra_path)
except OSError:
pass
os.mkdir(img_tra_path)
try:
shutil.rmtree(gt_tra_path)
except OSError:
pass
os.mkdir(gt_tra_path)
try:
shutil.rmtree(img_val_path)
except OSError:
pass
os.mkdir(img_val_path)
try:
shutil.rmtree(gt_val_path)
except OSError:
pass
os.mkdir(gt_val_path)
name_list = ['um', 'umm', 'uu']
def image_crop(img):
return img.crop((0, int(img.size[1]*0.45), img.size[0], img.size[1]))
for name in name_list:
train_list, valid_list = traval_split(img_src_path, sub=name)
for valid_img in valid_list:
if IMG_CROP:
img = Image.open(img_src_path+valid_img)
img_crop = image_crop(img)
img_crop.save(img_val_path+valid_img)
gt = Image.open(gt_src_path+valid_img)
gt_crop = image_crop(gt)
gt_crop.save(gt_val_path+valid_img)
else:
shutil.copy(img_src_path+valid_img, img_val_path+valid_img)
shutil.copy(gt_src_path+valid_img, gt_val_path+valid_img)
for train_img in train_list:
if IMG_CROP:
img = Image.open(img_src_path+train_img)
img_crop = image_crop(img)
img_crop.save(img_tra_path+train_img)
gt = Image.open(gt_src_path+train_img)
gt_crop = image_crop(gt)
gt_crop.save(gt_tra_path+train_img)
else:
shutil.copy(img_src_path+train_img, img_tra_path+train_img)
shutil.copy(gt_src_path+train_img, gt_tra_path+train_img)
| 26.985507 | 82 | 0.655478 | import os
import shutil
import random
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from PIL import Image
random.seed(2020)
IMG_CROP = True
train_gt_path = "../../data_road/training/gt_image_2/"
save_gt_path = "../../data_road/training/gt_image/"
gt_list = [f for f in os.listdir(train_gt_path) if f.endswith('.png')]
try:
shutil.rmtree(save_gt_path)
except OSError:
pass
os.mkdir(save_gt_path)
pbar = tqdm(total=289)
for gt in gt_list:
if "road" in gt:
img = np.array(Image.open(train_gt_path+gt))
height = img.shape[0]
width = img.shape[1]
gtId = np.zeros((height, width), dtype=np.uint8)
for i in range(height):
for j in range(width):
if sum(img[i, j, :] == [255, 0, 255]) == 3:
gtId[i, j] = 7
else:
gtId[i, j] = 0
gt_name = gt.split('_road_')
Image.fromarray(gtId).save(save_gt_path+gt_name[0]+'_'+gt_name[1])
pbar.update(1)
def traval_split(data_path, sub='um', seed=1):
random.seed(seed)
data_list = [f for f in os.listdir(data_path) if sub+'_' in f]
train_len = round(len(data_list)*0.9)
random.shuffle(data_list)
train_list = data_list[:train_len]
valid_list = data_list[train_len:]
return train_list, valid_list
img_src_path = '../../data_road/training/image_2/'
gt_src_path = '../../data_road/training/gt_image/'
base_dir = '../../data_road_3/'
try:
shutil.rmtree(base_dir)
except OSError:
pass
os.mkdir(base_dir)
try:
shutil.rmtree(base_dir+'training')
except OSError:
pass
os.mkdir(base_dir+'training')
try:
shutil.rmtree(base_dir+'validation')
except OSError:
pass
os.mkdir(base_dir+'validation')
img_tra_path = base_dir+'training/image/'
gt_tra_path = base_dir+'training/gt_image/'
img_val_path = base_dir+'validation/image/'
gt_val_path = base_dir+'validation/gt_image/'
try:
shutil.rmtree(img_tra_path)
except OSError:
pass
os.mkdir(img_tra_path)
try:
shutil.rmtree(gt_tra_path)
except OSError:
pass
os.mkdir(gt_tra_path)
try:
shutil.rmtree(img_val_path)
except OSError:
pass
os.mkdir(img_val_path)
try:
shutil.rmtree(gt_val_path)
except OSError:
pass
os.mkdir(gt_val_path)
name_list = ['um', 'umm', 'uu']
def image_crop(img):
return img.crop((0, int(img.size[1]*0.45), img.size[0], img.size[1]))
for name in name_list:
train_list, valid_list = traval_split(img_src_path, sub=name)
for valid_img in valid_list:
if IMG_CROP:
img = Image.open(img_src_path+valid_img)
img_crop = image_crop(img)
img_crop.save(img_val_path+valid_img)
gt = Image.open(gt_src_path+valid_img)
gt_crop = image_crop(gt)
gt_crop.save(gt_val_path+valid_img)
else:
shutil.copy(img_src_path+valid_img, img_val_path+valid_img)
shutil.copy(gt_src_path+valid_img, gt_val_path+valid_img)
for train_img in train_list:
if IMG_CROP:
img = Image.open(img_src_path+train_img)
img_crop = image_crop(img)
img_crop.save(img_tra_path+train_img)
gt = Image.open(gt_src_path+train_img)
gt_crop = image_crop(gt)
gt_crop.save(gt_tra_path+train_img)
else:
shutil.copy(img_src_path+train_img, img_tra_path+train_img)
shutil.copy(gt_src_path+train_img, gt_tra_path+train_img)
| true | true |
f7286ace2c69167571c07c9da6f8d95e7dbd792c | 526 | py | Python | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/cv01-33948 | 9c0e5b756a9742254ea82a6b71220e7c86397002 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/cv01-33948 | 9c0e5b756a9742254ea82a6b71220e7c86397002 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/cv01-33948 | 9c0e5b756a9742254ea82a6b71220e7c86397002 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "cv01-33948.botics.co"
site_params = {
"name": "cv01",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.230769 | 61 | 0.65019 | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "cv01-33948.botics.co"
site_params = {
"name": "cv01",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| true | true |
f7286b016acbeca564ced83432a01b33959f17d9 | 5,948 | py | Python | tests/dtypes_test.py | adler-j/jax | 3d7f884ccfe15da1b218903b37b255769223b4cf | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/dtypes_test.py | adler-j/jax | 3d7f884ccfe15da1b218903b37b255769223b4cf | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/dtypes_test.py | adler-j/jax | 3d7f884ccfe15da1b218903b37b255769223b4cf | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import operator
import unittest
import six
if six.PY3:
import enum
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import jax
from jax import dtypes
from jax import numpy as np
from jax import test_util as jtu
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
bool_dtypes = [onp.dtype('bool')]
signed_dtypes = [onp.dtype('int8'), onp.dtype('int16'), onp.dtype('int32'),
onp.dtype('int64')]
unsigned_dtypes = [onp.dtype('uint8'), onp.dtype('uint16'), onp.dtype('uint32'),
onp.dtype('uint64')]
onp_float_dtypes = [onp.dtype('float16'), onp.dtype('float32'),
onp.dtype('float64')]
float_dtypes = [onp.dtype(dtypes.bfloat16)] + onp_float_dtypes
complex_dtypes = [onp.dtype('complex64'), onp.dtype('complex128')]
all_dtypes = (bool_dtypes + signed_dtypes + unsigned_dtypes + float_dtypes +
complex_dtypes)
class DtypesTest(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": "_type={}".format(type.__name__), "type": type,
"dtype": dtype}
for type, dtype in [(bool, np.bool_), (int, np.int_), (float, np.float_),
(complex, np.complex_)])
def testDefaultTypes(self, type, dtype):
for f in [np.array, jax.jit(np.array), jax.jit(lambda x: x)]:
y = f(type(0))
self.assertTrue(isinstance(y, np.ndarray), msg=(f, y))
self.assertEqual(y.dtype, dtypes.canonicalize_dtype(dtype), msg=(f, y))
@parameterized.named_parameters(
{"testcase_name": "_swap={}_jit={}".format(swap, jit),
"swap": swap, "jit": jit}
for swap in [False, True] for jit in [False, True])
@jtu.skip_on_devices("tpu") # F16 not supported on TPU
def testBinaryPromotion(self, swap, jit):
testcases = [
(np.array(1.), 0., np.float_),
(np.array(1.), np.array(0.), np.float_),
(np.array(1.), np.array(0., dtype=np.float16), np.float_),
(np.array(1.), np.array(0., dtype=np.float32), np.float_),
(np.array(1.), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float16), 0., np.float16),
(np.array(1., dtype=np.float32), 0., np.float32),
(np.array(1., dtype=np.float64), 0., np.float64),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float16), np.float16),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float32), np.float32),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float32), np.array(0., dtype=np.float32), np.float32),
(np.array(1., dtype=np.float32), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float64), np.array(0., dtype=np.float64), np.float64),
(np.array([1.]), 0., np.float_),
(np.array([1.]), np.array(0.), np.float_),
(np.array([1.]), np.array(0., dtype=np.float16), np.float_),
(np.array([1.]), np.array(0., dtype=np.float32), np.float_),
(np.array([1.]), np.array(0., dtype=np.float64), np.float64),
(np.array([1.], dtype=np.float32), np.array(0., dtype=np.float16), np.float32),
(np.array([1.], dtype=np.float16), np.array(0., dtype=np.float32), np.float32),
(np.array([1.], dtype=np.float16), 0., np.float16),
]
op = jax.jit(operator.add) if jit else operator.add
for x, y, dtype in testcases:
x, y = (y, x) if swap else (x, y)
z = x + y
self.assertTrue(isinstance(z, np.ndarray), msg=(x, y, z))
self.assertEqual(z.dtype, dtypes.canonicalize_dtype(dtype), msg=(x, y, z))
def testPromoteDtypes(self):
for t1 in all_dtypes:
self.assertEqual(t1, dtypes.promote_types(t1, t1))
self.assertEqual(t1, dtypes.promote_types(t1, onp.bool_))
self.assertEqual(onp.dtype(onp.complex128),
dtypes.promote_types(t1, onp.complex128))
for t2 in all_dtypes:
# Symmetry
self.assertEqual(dtypes.promote_types(t1, t2),
dtypes.promote_types(t2, t1))
self.assertEqual(onp.dtype(onp.float32),
dtypes.promote_types(onp.float16, dtypes.bfloat16))
# Promotions of non-inexact types against inexact types always prefer
# the inexact types.
for t in float_dtypes + complex_dtypes:
for i in bool_dtypes + signed_dtypes + unsigned_dtypes:
self.assertEqual(t, dtypes.promote_types(t, i))
# Promotions between exact types, or between inexact types, match NumPy.
for groups in [bool_dtypes + signed_dtypes + unsigned_dtypes,
onp_float_dtypes + complex_dtypes]:
for t1, t2 in itertools.combinations(groups, 2):
self.assertEqual(onp.promote_types(t1, t2),
dtypes.promote_types(t1, t2))
@unittest.skipIf(six.PY2, "Test requires Python 3")
def testEnumPromotion(self):
class AnEnum(enum.IntEnum):
A = 42
B = 101
onp.testing.assert_equal(onp.array(42), onp.array(AnEnum.A))
onp.testing.assert_equal(np.array(42), np.array(AnEnum.A))
onp.testing.assert_equal(onp.int32(101), onp.int32(AnEnum.B))
onp.testing.assert_equal(np.int32(101), np.int32(AnEnum.B))
if __name__ == "__main__":
absltest.main()
| 38.623377 | 85 | 0.654506 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import operator
import unittest
import six
if six.PY3:
import enum
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import jax
from jax import dtypes
from jax import numpy as np
from jax import test_util as jtu
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
bool_dtypes = [onp.dtype('bool')]
signed_dtypes = [onp.dtype('int8'), onp.dtype('int16'), onp.dtype('int32'),
onp.dtype('int64')]
unsigned_dtypes = [onp.dtype('uint8'), onp.dtype('uint16'), onp.dtype('uint32'),
onp.dtype('uint64')]
onp_float_dtypes = [onp.dtype('float16'), onp.dtype('float32'),
onp.dtype('float64')]
float_dtypes = [onp.dtype(dtypes.bfloat16)] + onp_float_dtypes
complex_dtypes = [onp.dtype('complex64'), onp.dtype('complex128')]
all_dtypes = (bool_dtypes + signed_dtypes + unsigned_dtypes + float_dtypes +
complex_dtypes)
class DtypesTest(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": "_type={}".format(type.__name__), "type": type,
"dtype": dtype}
for type, dtype in [(bool, np.bool_), (int, np.int_), (float, np.float_),
(complex, np.complex_)])
def testDefaultTypes(self, type, dtype):
for f in [np.array, jax.jit(np.array), jax.jit(lambda x: x)]:
y = f(type(0))
self.assertTrue(isinstance(y, np.ndarray), msg=(f, y))
self.assertEqual(y.dtype, dtypes.canonicalize_dtype(dtype), msg=(f, y))
@parameterized.named_parameters(
{"testcase_name": "_swap={}_jit={}".format(swap, jit),
"swap": swap, "jit": jit}
for swap in [False, True] for jit in [False, True])
@jtu.skip_on_devices("tpu")
def testBinaryPromotion(self, swap, jit):
testcases = [
(np.array(1.), 0., np.float_),
(np.array(1.), np.array(0.), np.float_),
(np.array(1.), np.array(0., dtype=np.float16), np.float_),
(np.array(1.), np.array(0., dtype=np.float32), np.float_),
(np.array(1.), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float16), 0., np.float16),
(np.array(1., dtype=np.float32), 0., np.float32),
(np.array(1., dtype=np.float64), 0., np.float64),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float16), np.float16),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float32), np.float32),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float32), np.array(0., dtype=np.float32), np.float32),
(np.array(1., dtype=np.float32), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float64), np.array(0., dtype=np.float64), np.float64),
(np.array([1.]), 0., np.float_),
(np.array([1.]), np.array(0.), np.float_),
(np.array([1.]), np.array(0., dtype=np.float16), np.float_),
(np.array([1.]), np.array(0., dtype=np.float32), np.float_),
(np.array([1.]), np.array(0., dtype=np.float64), np.float64),
(np.array([1.], dtype=np.float32), np.array(0., dtype=np.float16), np.float32),
(np.array([1.], dtype=np.float16), np.array(0., dtype=np.float32), np.float32),
(np.array([1.], dtype=np.float16), 0., np.float16),
]
op = jax.jit(operator.add) if jit else operator.add
for x, y, dtype in testcases:
x, y = (y, x) if swap else (x, y)
z = x + y
self.assertTrue(isinstance(z, np.ndarray), msg=(x, y, z))
self.assertEqual(z.dtype, dtypes.canonicalize_dtype(dtype), msg=(x, y, z))
def testPromoteDtypes(self):
for t1 in all_dtypes:
self.assertEqual(t1, dtypes.promote_types(t1, t1))
self.assertEqual(t1, dtypes.promote_types(t1, onp.bool_))
self.assertEqual(onp.dtype(onp.complex128),
dtypes.promote_types(t1, onp.complex128))
for t2 in all_dtypes:
self.assertEqual(dtypes.promote_types(t1, t2),
dtypes.promote_types(t2, t1))
self.assertEqual(onp.dtype(onp.float32),
dtypes.promote_types(onp.float16, dtypes.bfloat16))
for t in float_dtypes + complex_dtypes:
for i in bool_dtypes + signed_dtypes + unsigned_dtypes:
self.assertEqual(t, dtypes.promote_types(t, i))
for groups in [bool_dtypes + signed_dtypes + unsigned_dtypes,
onp_float_dtypes + complex_dtypes]:
for t1, t2 in itertools.combinations(groups, 2):
self.assertEqual(onp.promote_types(t1, t2),
dtypes.promote_types(t1, t2))
@unittest.skipIf(six.PY2, "Test requires Python 3")
def testEnumPromotion(self):
class AnEnum(enum.IntEnum):
A = 42
B = 101
onp.testing.assert_equal(onp.array(42), onp.array(AnEnum.A))
onp.testing.assert_equal(np.array(42), np.array(AnEnum.A))
onp.testing.assert_equal(onp.int32(101), onp.int32(AnEnum.B))
onp.testing.assert_equal(np.int32(101), np.int32(AnEnum.B))
if __name__ == "__main__":
absltest.main()
| true | true |
f7286b7177da4a0237e472b82963ce295b3c4ff0 | 9,956 | py | Python | journeylog/migrations/0001_initial_squashed_0003_auto_20181014_0556.py | soulweaver91/journeylog-be | ce3fd676d393605254fec3fd9727770d93b3609f | [
"MIT"
] | null | null | null | journeylog/migrations/0001_initial_squashed_0003_auto_20181014_0556.py | soulweaver91/journeylog-be | ce3fd676d393605254fec3fd9727770d93b3609f | [
"MIT"
] | 6 | 2019-04-28T23:32:30.000Z | 2021-09-18T23:21:51.000Z | journeylog/migrations/0001_initial_squashed_0003_auto_20181014_0556.py | soulweaver91/journeylog-be | ce3fd676d393605254fec3fd9727770d93b3609f | [
"MIT"
] | 1 | 2020-12-29T15:17:55.000Z | 2020-12-29T15:17:55.000Z | # Generated by Django 2.1 on 2018-10-14 02:57
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.functions.comparison
import re
class Migration(migrations.Migration):
replaces = [('journeylog', '0001_initial'), ('journeylog', '0002_auto_20181014_0546'), ('journeylog', '0003_auto_20181014_0556')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='JournalPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('slug', models.SlugField(max_length=100, unique=True)),
('name', models.CharField(blank=True, max_length=100)),
('order_no', models.SmallIntegerField(default=0)),
('text', models.TextField(blank=True)),
('date_start', models.DateTimeField()),
('date_end', models.DateTimeField()),
('disabled_modules', models.CharField(max_length=255, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')])),
],
options={
'ordering': ['order_no', 'date_start'],
},
),
migrations.CreateModel(
name='Journey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('slug', models.SlugField(max_length=100, unique=True)),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('background', models.CharField(blank=True, max_length=240)),
('date_start', models.DateTimeField(blank=True, null=True)),
('date_end', models.DateTimeField(blank=True, null=True)),
],
options={
'ordering': ['date_start', 'name'],
},
),
migrations.CreateModel(
name='JourneyLocationVisit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField()),
('journey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='journeylog.Journey')),
],
options={
'ordering': ['timestamp'],
},
),
migrations.CreateModel(
name='JourneyMapPointVisit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('latitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9)),
('longitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9)),
('timestamp', models.DateTimeField()),
('journey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='journeylog.Journey')),
],
options={
'ordering': ['timestamp'],
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100)),
('latitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9)),
('longitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9)),
('type', models.CharField(choices=[('Buildings', (('ARCADE', 'Arcade hall'), ('CAFE', 'Café'), ('HOTEL', 'Hotel'), ('INFO_CENTER', 'Information center'), ('KARAOKE', 'Karaoke'), ('MUSEUM', 'Museum'), ('RESTAURANT', 'Restaurant'), ('SHOP', 'Shop'))), ('Points of interest', (('MONUMENT', 'Monument'), ('MUSEUM', 'Museum'), ('PARK', 'Park'), ('SHRINE', 'Shrine'))), ('Transit points', (('AIRPORT', 'Airport'), ('BUS_STATION', 'Bus station'), ('RAILWAY_STATION', 'Railway station'), ('SUBWAY_STATION', 'Subway station'))), ('Traveling', (('ONBOARD_AIRPLANE', 'On an airplane'), ('ONBOARD_BUS', 'On a bus'), ('ONBOARD_SHIP', 'On a ship'))), ('Other', (('HOME', 'Home'), ('BUILDING', 'Other building'), ('PLACE', 'Other place')))], default='PLACE', max_length=100)),
('color', models.CharField(blank=True, max_length=6)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='LocationName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('lang', models.CharField(max_length=10)),
('name', models.CharField(max_length=200)),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='journeylog.Location')),
],
options={
'ordering': ['location', 'name'],
},
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=200)),
('latitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9)),
('longitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9)),
('description', models.TextField(blank=True)),
('timezone', models.CharField(max_length=50)),
('timestamp', models.DateTimeField()),
('filename', models.TextField(editable=False)),
('filesize', models.BigIntegerField(editable=False)),
('height', models.PositiveIntegerField(editable=False)),
('width', models.PositiveIntegerField(editable=False)),
('hash', models.CharField(editable=False, max_length=40)),
('camera_make', models.CharField(blank=True, max_length=100, null=True)),
('camera_model', models.CharField(blank=True, max_length=100, null=True)),
('focal_length', models.CharField(blank=True, max_length=20, null=True)),
('exposure', models.CharField(blank=True, max_length=20, null=True)),
('iso_speed', models.CharField(blank=True, max_length=20, null=True)),
('f_value', models.CharField(blank=True, max_length=20, null=True)),
('flash_fired', models.BooleanField(default=False)),
('flash_manual', models.BooleanField(default=False)),
('confidentiality', models.SmallIntegerField(default=0)),
('journey', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='journeylog.Journey')),
],
options={
'ordering': ['timestamp', 'name'],
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=200)),
('parent_tag', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='journeylog.Tag')),
],
options={
'ordering': ['name'],
},
),
migrations.AddField(
model_name='journeylocationvisit',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='journeylog.Location'),
),
migrations.AddField(
model_name='journalpage',
name='journey',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='journeylog.Journey'),
),
migrations.AlterUniqueTogether(
name='locationname',
unique_together={('location', 'lang')},
),
migrations.AddField(
model_name='locationname',
name='sort_key',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='journalpage',
name='date_end',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='journalpage',
name='date_start',
field=models.DateTimeField(blank=True, null=True),
),
]
| 53.526882 | 777 | 0.57262 |
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.functions.comparison
import re
class Migration(migrations.Migration):
replaces = [('journeylog', '0001_initial'), ('journeylog', '0002_auto_20181014_0546'), ('journeylog', '0003_auto_20181014_0556')]
dependencies = [
]
operations = [
migrations.CreateModel(
name='JournalPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('slug', models.SlugField(max_length=100, unique=True)),
('name', models.CharField(blank=True, max_length=100)),
('order_no', models.SmallIntegerField(default=0)),
('text', models.TextField(blank=True)),
('date_start', models.DateTimeField()),
('date_end', models.DateTimeField()),
('disabled_modules', models.CharField(max_length=255, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')])),
],
options={
'ordering': ['order_no', 'date_start'],
},
),
migrations.CreateModel(
name='Journey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('slug', models.SlugField(max_length=100, unique=True)),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('background', models.CharField(blank=True, max_length=240)),
('date_start', models.DateTimeField(blank=True, null=True)),
('date_end', models.DateTimeField(blank=True, null=True)),
],
options={
'ordering': ['date_start', 'name'],
},
),
migrations.CreateModel(
name='JourneyLocationVisit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField()),
('journey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='journeylog.Journey')),
],
options={
'ordering': ['timestamp'],
},
),
migrations.CreateModel(
name='JourneyMapPointVisit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('latitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9)),
('longitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9)),
('timestamp', models.DateTimeField()),
('journey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='journeylog.Journey')),
],
options={
'ordering': ['timestamp'],
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100)),
('latitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9)),
('longitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9)),
('type', models.CharField(choices=[('Buildings', (('ARCADE', 'Arcade hall'), ('CAFE', 'Café'), ('HOTEL', 'Hotel'), ('INFO_CENTER', 'Information center'), ('KARAOKE', 'Karaoke'), ('MUSEUM', 'Museum'), ('RESTAURANT', 'Restaurant'), ('SHOP', 'Shop'))), ('Points of interest', (('MONUMENT', 'Monument'), ('MUSEUM', 'Museum'), ('PARK', 'Park'), ('SHRINE', 'Shrine'))), ('Transit points', (('AIRPORT', 'Airport'), ('BUS_STATION', 'Bus station'), ('RAILWAY_STATION', 'Railway station'), ('SUBWAY_STATION', 'Subway station'))), ('Traveling', (('ONBOARD_AIRPLANE', 'On an airplane'), ('ONBOARD_BUS', 'On a bus'), ('ONBOARD_SHIP', 'On a ship'))), ('Other', (('HOME', 'Home'), ('BUILDING', 'Other building'), ('PLACE', 'Other place')))], default='PLACE', max_length=100)),
('color', models.CharField(blank=True, max_length=6)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='LocationName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('lang', models.CharField(max_length=10)),
('name', models.CharField(max_length=200)),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='journeylog.Location')),
],
options={
'ordering': ['location', 'name'],
},
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=200)),
('latitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9)),
('longitude', models.DecimalField(blank=True, decimal_places=6, max_digits=9)),
('description', models.TextField(blank=True)),
('timezone', models.CharField(max_length=50)),
('timestamp', models.DateTimeField()),
('filename', models.TextField(editable=False)),
('filesize', models.BigIntegerField(editable=False)),
('height', models.PositiveIntegerField(editable=False)),
('width', models.PositiveIntegerField(editable=False)),
('hash', models.CharField(editable=False, max_length=40)),
('camera_make', models.CharField(blank=True, max_length=100, null=True)),
('camera_model', models.CharField(blank=True, max_length=100, null=True)),
('focal_length', models.CharField(blank=True, max_length=20, null=True)),
('exposure', models.CharField(blank=True, max_length=20, null=True)),
('iso_speed', models.CharField(blank=True, max_length=20, null=True)),
('f_value', models.CharField(blank=True, max_length=20, null=True)),
('flash_fired', models.BooleanField(default=False)),
('flash_manual', models.BooleanField(default=False)),
('confidentiality', models.SmallIntegerField(default=0)),
('journey', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='journeylog.Journey')),
],
options={
'ordering': ['timestamp', 'name'],
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=200)),
('parent_tag', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='journeylog.Tag')),
],
options={
'ordering': ['name'],
},
),
migrations.AddField(
model_name='journeylocationvisit',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='journeylog.Location'),
),
migrations.AddField(
model_name='journalpage',
name='journey',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='journeylog.Journey'),
),
migrations.AlterUniqueTogether(
name='locationname',
unique_together={('location', 'lang')},
),
migrations.AddField(
model_name='locationname',
name='sort_key',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='journalpage',
name='date_end',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='journalpage',
name='date_start',
field=models.DateTimeField(blank=True, null=True),
),
]
| true | true |
f7286c36f71071c90ef94cac759a86bfa53d77c7 | 16,233 | py | Python | lisa/node.py | Abdelrahman0W/lisa | cd85034d83880b9fe6a50c7de817071a943ae39b | [
"MIT"
] | null | null | null | lisa/node.py | Abdelrahman0W/lisa | cd85034d83880b9fe6a50c7de817071a943ae39b | [
"MIT"
] | null | null | null | lisa/node.py | Abdelrahman0W/lisa | cd85034d83880b9fe6a50c7de817071a943ae39b | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
from pathlib import Path, PurePath, PurePosixPath, PureWindowsPath
from random import randint
from typing import Any, Dict, Iterable, List, Optional, Type, TypeVar, Union, cast
from lisa import schema
from lisa.executable import Tools
from lisa.feature import Features
from lisa.operating_system import OperatingSystem
from lisa.tools import Echo, Reboot
from lisa.util import (
ContextMixin,
InitializableMixin,
LisaException,
constants,
fields_to_dict,
subclasses,
)
from lisa.util.logger import get_logger
from lisa.util.process import ExecutableResult, Process
from lisa.util.shell import ConnectionInfo, LocalShell, Shell, SshShell
T = TypeVar("T")
class Node(subclasses.BaseClassWithRunbookMixin, ContextMixin, InitializableMixin):
_factory: Optional[subclasses.Factory[Node]] = None
def __init__(
self,
runbook: schema.Node,
index: int,
logger_name: str,
base_log_path: Optional[Path] = None,
) -> None:
super().__init__(runbook=runbook)
self.is_default = runbook.is_default
self.capability = runbook.capability
self.name = runbook.name
self.index = index
self._shell: Optional[Shell] = None
# will be initialized by platform
self.features: Features
self.tools = Tools(self)
# the path uses remotely
node_id = str(self.index) if self.index >= 0 else ""
self.log = get_logger(logger_name, node_id)
# The working path will be created in remote node, when it's used.
self._working_path: Optional[PurePath] = None
self._base_local_log_path = base_log_path
# Not to set the log path until its first used. Because the path
# contains node name, which is not set in __init__.
self._local_log_path: Optional[Path] = None
self._support_sudo: Optional[bool] = None
@property
def shell(self) -> Shell:
assert self._shell, "Shell is not initialized"
return self._shell
@property
def is_posix(self) -> bool:
self.initialize()
return self.os.is_posix
@property
def is_remote(self) -> bool:
raise NotImplementedError()
@property
def support_sudo(self) -> bool:
self.initialize()
# check if sudo supported
if self.is_posix and self._support_sudo is None:
process = self._execute("command -v sudo", shell=True, no_info_log=True)
result = process.wait_result(10)
if result.exit_code == 0:
self._support_sudo = True
else:
self._support_sudo = False
self.log.debug("node doesn't support sudo, may cause failure later.")
if self._support_sudo is None:
# set Windows to true to ignore sudo asks.
self._support_sudo = True
return self._support_sudo
@property
def is_connected(self) -> bool:
return self._shell is not None and self._shell.is_connected
@property
def local_log_path(self) -> Path:
if not self._local_log_path:
base_path = self._base_local_log_path
if not base_path:
base_path = constants.RUN_LOCAL_PATH
path_name = self.name
if not path_name:
if self.index:
index = self.index
else:
index = randint(0, 10000)
path_name = f"node-{index}"
self._local_log_path = base_path / path_name
if self._local_log_path.exists():
raise LisaException(
"Conflicting node log path detected, "
"make sure LISA invocations have individual runtime paths."
f"'{self._local_log_path}'"
)
self._local_log_path.mkdir(parents=True)
return self._local_log_path
@property
def working_path(self) -> PurePath:
"""
The working path may be a remote path on remote node. It uses to put executable.
"""
if not self._working_path:
self._working_path = self._create_working_path()
self.shell.mkdir(self._working_path, parents=True, exist_ok=True)
self.log.debug(f"working path is: '{self._working_path}'")
return self._working_path
@classmethod
def create(
cls,
index: int,
runbook: schema.Node,
logger_name: str = "node",
base_log_path: Optional[Path] = None,
) -> Node:
if not cls._factory:
cls._factory = subclasses.Factory[Node](Node)
node = cls._factory.create_by_runbook(
index=index,
runbook=runbook,
logger_name=logger_name,
base_log_path=base_log_path,
)
node.log.debug(
f"created, type: '{node.__class__.__name__}', default: {runbook.is_default}"
)
return node
def reboot(self) -> None:
self.tools[Reboot].reboot()
def execute(
self,
cmd: str,
shell: bool = False,
sudo: bool = False,
no_error_log: bool = False,
no_info_log: bool = True,
cwd: Optional[PurePath] = None,
timeout: int = 600,
) -> ExecutableResult:
process = self.execute_async(
cmd,
shell=shell,
sudo=sudo,
no_error_log=no_error_log,
no_info_log=no_info_log,
cwd=cwd,
)
return process.wait_result(timeout=timeout)
def execute_async(
self,
cmd: str,
shell: bool = False,
sudo: bool = False,
no_error_log: bool = False,
no_info_log: bool = True,
cwd: Optional[PurePath] = None,
) -> Process:
self.initialize()
if sudo and not self.support_sudo:
raise LisaException(
f"node doesn't support [command] or [sudo], cannot execute: {cmd}"
)
return self._execute(
cmd,
shell=shell,
sudo=sudo,
no_error_log=no_error_log,
no_info_log=no_info_log,
cwd=cwd,
)
def close(self) -> None:
self.log.debug("closing node connection...")
if self._shell:
self._shell.close()
def get_pure_path(self, path: str) -> PurePath:
# spurplus doesn't support PurePath, so it needs to resolve by the
# node's os here.
if self.is_posix:
return PurePosixPath(path)
else:
return PureWindowsPath(path)
def _initialize(self, *args: Any, **kwargs: Any) -> None:
self.log.info(f"initializing node '{self.name}' {self}")
self.shell.initialize()
self.os: OperatingSystem = OperatingSystem.create(self)
def _execute(
self,
cmd: str,
shell: bool = False,
sudo: bool = False,
no_error_log: bool = False,
no_info_log: bool = False,
cwd: Optional[PurePath] = None,
) -> Process:
cmd_id = str(randint(0, 10000))
process = Process(cmd_id, self.shell, parent_logger=self.log)
process.start(
cmd,
shell=shell,
sudo=sudo,
no_error_log=no_error_log,
no_info_log=no_info_log,
cwd=cwd,
)
return process
def _create_working_path(self) -> PurePath:
raise NotImplementedError()
class RemoteNode(Node):
def __repr__(self) -> str:
return str(self._connection_info)
@property
def is_remote(self) -> bool:
return True
@property
def connection_info(self) -> Dict[str, Any]:
return fields_to_dict(
self._connection_info,
[
constants.ENVIRONMENTS_NODES_REMOTE_ADDRESS,
constants.ENVIRONMENTS_NODES_REMOTE_PORT,
constants.ENVIRONMENTS_NODES_REMOTE_USERNAME,
constants.ENVIRONMENTS_NODES_REMOTE_PASSWORD,
constants.ENVIRONMENTS_NODES_REMOTE_PRIVATE_KEY_FILE,
],
is_none_included=True,
)
@classmethod
def type_name(cls) -> str:
return constants.ENVIRONMENTS_NODES_REMOTE
@classmethod
def type_schema(cls) -> Type[schema.TypedSchema]:
return schema.RemoteNode
def set_connection_info_by_runbook(
self,
default_username: str = "",
default_password: str = "",
default_private_key_file: str = "",
) -> None:
fields = [
constants.ENVIRONMENTS_NODES_REMOTE_ADDRESS,
constants.ENVIRONMENTS_NODES_REMOTE_PORT,
constants.ENVIRONMENTS_NODES_REMOTE_PUBLIC_ADDRESS,
constants.ENVIRONMENTS_NODES_REMOTE_PUBLIC_PORT,
]
parameters = fields_to_dict(self.runbook, fields)
# use default credential, if they are not specified
node_runbook = cast(schema.RemoteNode, self.runbook)
parameters[constants.ENVIRONMENTS_NODES_REMOTE_USERNAME] = (
node_runbook.username if node_runbook.username else default_username
)
parameters[constants.ENVIRONMENTS_NODES_REMOTE_PASSWORD] = (
node_runbook.password if node_runbook.password else default_password
)
parameters[constants.ENVIRONMENTS_NODES_REMOTE_PRIVATE_KEY_FILE] = (
node_runbook.private_key_file
if node_runbook.private_key_file
else default_private_key_file
)
self.set_connection_info(**parameters)
def set_connection_info(
self,
address: str = "",
port: Optional[int] = 22,
public_address: str = "",
public_port: Optional[int] = 22,
username: str = "root",
password: str = "",
private_key_file: str = "",
) -> None:
if hasattr(self, "_connection_info"):
raise LisaException(
"node is set connection information already, cannot set again"
)
if not address and not public_address:
raise LisaException(
"at least one of address and public_address need to be set"
)
elif not address:
address = public_address
elif not public_address:
public_address = address
if not port and not public_port:
raise LisaException("at least one of port and public_port need to be set")
elif not port:
port = public_port
elif not public_port:
public_port = port
assert public_port
assert port
self._connection_info: ConnectionInfo = ConnectionInfo(
public_address,
public_port,
username,
password,
private_key_file,
)
self._shell = SshShell(self._connection_info)
self.public_address = public_address
self.public_port = public_port
self.internal_address = address
self.internal_port = port
def _initialize(self, *args: Any, **kwargs: Any) -> None:
assert self._connection_info, "call setConnectionInfo before use remote node"
super()._initialize(*args, **kwargs)
def _create_working_path(self) -> PurePath:
if self.is_posix:
remote_root_path = Path("$HOME")
else:
remote_root_path = Path("%TEMP%")
working_path = remote_root_path.joinpath(
constants.PATH_REMOTE_ROOT, constants.RUN_LOGIC_PATH
).as_posix()
# expand environment variables in path
echo = self.tools[Echo]
result = echo.run(working_path, shell=True)
return self.get_pure_path(result.stdout)
class LocalNode(Node):
def __init__(
self,
runbook: schema.Node,
index: int,
logger_name: str,
base_log_path: Optional[Path],
) -> None:
super().__init__(
index=index,
runbook=runbook,
logger_name=logger_name,
base_log_path=base_log_path,
)
self._shell = LocalShell()
@property
def is_remote(self) -> bool:
return False
@classmethod
def type_name(cls) -> str:
return constants.ENVIRONMENTS_NODES_LOCAL
@classmethod
def type_schema(cls) -> Type[schema.TypedSchema]:
return schema.LocalNode
def _create_working_path(self) -> PurePath:
return constants.RUN_LOCAL_PATH
def __repr__(self) -> str:
return "local"
class Nodes:
def __init__(self) -> None:
super().__init__()
self._default: Optional[Node] = None
self._list: List[Node] = []
def __getitem__(self, key: Union[int, str]) -> Node:
found = None
if not self._list:
raise LisaException("no node found")
if isinstance(key, int):
if len(self._list) > key:
found = self._list[key]
else:
for node in self._list:
if node.name == key:
found = node
break
if not found:
raise KeyError(f"cannot find node {key}")
return found
def __setitem__(self, key: Union[int, str], v: Node) -> None:
raise NotImplementedError("don't set node directly, call from_*")
def __len__(self) -> int:
return len(self._list)
@property
def default(self) -> Node:
if self._default is None:
default = None
for node in self._list:
if node.is_default:
default = node
break
if default is None:
if len(self._list) == 0:
raise LisaException("No node found in current environment")
else:
default = self._list[0]
self._default = default
return self._default
def list(self) -> Iterable[Node]:
for node in self._list:
yield node
def initialize(self) -> None:
for node in self._list:
node.initialize()
def close(self) -> None:
for node in self._list:
node.close()
def from_existing(
self,
node_runbook: schema.Node,
environment_name: str,
base_log_path: Optional[Path] = None,
) -> Node:
node = Node.create(
index=len(self._list),
runbook=node_runbook,
logger_name=environment_name,
base_log_path=base_log_path,
)
self._list.append(node)
return node
def from_requirement(
self,
node_requirement: schema.NodeSpace,
environment_name: str,
base_log_path: Optional[Path] = None,
) -> Node:
min_requirement = cast(
schema.Capability,
node_requirement.generate_min_capability(node_requirement),
)
assert isinstance(min_requirement.node_count, int), (
f"must be int after generate_min_capability, "
f"actual: {min_requirement.node_count}"
)
# node count should be expanded in platform already
assert min_requirement.node_count == 1, f"actual: {min_requirement.node_count}"
mock_runbook = schema.RemoteNode(
type=constants.ENVIRONMENTS_NODES_REMOTE,
capability=min_requirement,
is_default=node_requirement.is_default,
)
node = Node.create(
index=len(self._list),
runbook=mock_runbook,
logger_name=environment_name,
base_log_path=base_log_path,
)
self._list.append(node)
return node
def quick_connect(runbook: schema.Node, logger_name: str = "", index: int = -1) -> Node:
"""
setup node information and initialize conneciton.
"""
node = Node.create(index, runbook, logger_name=logger_name)
if isinstance(node, RemoteNode):
node.set_connection_info_by_runbook()
node.initialize()
return node
| 30.513158 | 88 | 0.595084 |
from __future__ import annotations
from pathlib import Path, PurePath, PurePosixPath, PureWindowsPath
from random import randint
from typing import Any, Dict, Iterable, List, Optional, Type, TypeVar, Union, cast
from lisa import schema
from lisa.executable import Tools
from lisa.feature import Features
from lisa.operating_system import OperatingSystem
from lisa.tools import Echo, Reboot
from lisa.util import (
ContextMixin,
InitializableMixin,
LisaException,
constants,
fields_to_dict,
subclasses,
)
from lisa.util.logger import get_logger
from lisa.util.process import ExecutableResult, Process
from lisa.util.shell import ConnectionInfo, LocalShell, Shell, SshShell
T = TypeVar("T")
class Node(subclasses.BaseClassWithRunbookMixin, ContextMixin, InitializableMixin):
_factory: Optional[subclasses.Factory[Node]] = None
def __init__(
self,
runbook: schema.Node,
index: int,
logger_name: str,
base_log_path: Optional[Path] = None,
) -> None:
super().__init__(runbook=runbook)
self.is_default = runbook.is_default
self.capability = runbook.capability
self.name = runbook.name
self.index = index
self._shell: Optional[Shell] = None
self.features: Features
self.tools = Tools(self)
node_id = str(self.index) if self.index >= 0 else ""
self.log = get_logger(logger_name, node_id)
self._working_path: Optional[PurePath] = None
self._base_local_log_path = base_log_path
# Not to set the log path until its first used. Because the path
# contains node name, which is not set in __init__.
self._local_log_path: Optional[Path] = None
self._support_sudo: Optional[bool] = None
@property
def shell(self) -> Shell:
assert self._shell, "Shell is not initialized"
return self._shell
@property
def is_posix(self) -> bool:
self.initialize()
return self.os.is_posix
@property
def is_remote(self) -> bool:
raise NotImplementedError()
@property
def support_sudo(self) -> bool:
self.initialize()
# check if sudo supported
if self.is_posix and self._support_sudo is None:
process = self._execute("command -v sudo", shell=True, no_info_log=True)
result = process.wait_result(10)
if result.exit_code == 0:
self._support_sudo = True
else:
self._support_sudo = False
self.log.debug("node doesn't support sudo, may cause failure later.")
if self._support_sudo is None:
self._support_sudo = True
return self._support_sudo
@property
def is_connected(self) -> bool:
return self._shell is not None and self._shell.is_connected
@property
def local_log_path(self) -> Path:
if not self._local_log_path:
base_path = self._base_local_log_path
if not base_path:
base_path = constants.RUN_LOCAL_PATH
path_name = self.name
if not path_name:
if self.index:
index = self.index
else:
index = randint(0, 10000)
path_name = f"node-{index}"
self._local_log_path = base_path / path_name
if self._local_log_path.exists():
raise LisaException(
"Conflicting node log path detected, "
"make sure LISA invocations have individual runtime paths."
f"'{self._local_log_path}'"
)
self._local_log_path.mkdir(parents=True)
return self._local_log_path
@property
def working_path(self) -> PurePath:
if not self._working_path:
self._working_path = self._create_working_path()
self.shell.mkdir(self._working_path, parents=True, exist_ok=True)
self.log.debug(f"working path is: '{self._working_path}'")
return self._working_path
@classmethod
def create(
cls,
index: int,
runbook: schema.Node,
logger_name: str = "node",
base_log_path: Optional[Path] = None,
) -> Node:
if not cls._factory:
cls._factory = subclasses.Factory[Node](Node)
node = cls._factory.create_by_runbook(
index=index,
runbook=runbook,
logger_name=logger_name,
base_log_path=base_log_path,
)
node.log.debug(
f"created, type: '{node.__class__.__name__}', default: {runbook.is_default}"
)
return node
def reboot(self) -> None:
self.tools[Reboot].reboot()
def execute(
self,
cmd: str,
shell: bool = False,
sudo: bool = False,
no_error_log: bool = False,
no_info_log: bool = True,
cwd: Optional[PurePath] = None,
timeout: int = 600,
) -> ExecutableResult:
process = self.execute_async(
cmd,
shell=shell,
sudo=sudo,
no_error_log=no_error_log,
no_info_log=no_info_log,
cwd=cwd,
)
return process.wait_result(timeout=timeout)
def execute_async(
self,
cmd: str,
shell: bool = False,
sudo: bool = False,
no_error_log: bool = False,
no_info_log: bool = True,
cwd: Optional[PurePath] = None,
) -> Process:
self.initialize()
if sudo and not self.support_sudo:
raise LisaException(
f"node doesn't support [command] or [sudo], cannot execute: {cmd}"
)
return self._execute(
cmd,
shell=shell,
sudo=sudo,
no_error_log=no_error_log,
no_info_log=no_info_log,
cwd=cwd,
)
def close(self) -> None:
self.log.debug("closing node connection...")
if self._shell:
self._shell.close()
def get_pure_path(self, path: str) -> PurePath:
# spurplus doesn't support PurePath, so it needs to resolve by the
if self.is_posix:
return PurePosixPath(path)
else:
return PureWindowsPath(path)
def _initialize(self, *args: Any, **kwargs: Any) -> None:
self.log.info(f"initializing node '{self.name}' {self}")
self.shell.initialize()
self.os: OperatingSystem = OperatingSystem.create(self)
def _execute(
self,
cmd: str,
shell: bool = False,
sudo: bool = False,
no_error_log: bool = False,
no_info_log: bool = False,
cwd: Optional[PurePath] = None,
) -> Process:
cmd_id = str(randint(0, 10000))
process = Process(cmd_id, self.shell, parent_logger=self.log)
process.start(
cmd,
shell=shell,
sudo=sudo,
no_error_log=no_error_log,
no_info_log=no_info_log,
cwd=cwd,
)
return process
def _create_working_path(self) -> PurePath:
raise NotImplementedError()
class RemoteNode(Node):
def __repr__(self) -> str:
return str(self._connection_info)
@property
def is_remote(self) -> bool:
return True
@property
def connection_info(self) -> Dict[str, Any]:
return fields_to_dict(
self._connection_info,
[
constants.ENVIRONMENTS_NODES_REMOTE_ADDRESS,
constants.ENVIRONMENTS_NODES_REMOTE_PORT,
constants.ENVIRONMENTS_NODES_REMOTE_USERNAME,
constants.ENVIRONMENTS_NODES_REMOTE_PASSWORD,
constants.ENVIRONMENTS_NODES_REMOTE_PRIVATE_KEY_FILE,
],
is_none_included=True,
)
@classmethod
def type_name(cls) -> str:
return constants.ENVIRONMENTS_NODES_REMOTE
@classmethod
def type_schema(cls) -> Type[schema.TypedSchema]:
return schema.RemoteNode
def set_connection_info_by_runbook(
self,
default_username: str = "",
default_password: str = "",
default_private_key_file: str = "",
) -> None:
fields = [
constants.ENVIRONMENTS_NODES_REMOTE_ADDRESS,
constants.ENVIRONMENTS_NODES_REMOTE_PORT,
constants.ENVIRONMENTS_NODES_REMOTE_PUBLIC_ADDRESS,
constants.ENVIRONMENTS_NODES_REMOTE_PUBLIC_PORT,
]
parameters = fields_to_dict(self.runbook, fields)
# use default credential, if they are not specified
node_runbook = cast(schema.RemoteNode, self.runbook)
parameters[constants.ENVIRONMENTS_NODES_REMOTE_USERNAME] = (
node_runbook.username if node_runbook.username else default_username
)
parameters[constants.ENVIRONMENTS_NODES_REMOTE_PASSWORD] = (
node_runbook.password if node_runbook.password else default_password
)
parameters[constants.ENVIRONMENTS_NODES_REMOTE_PRIVATE_KEY_FILE] = (
node_runbook.private_key_file
if node_runbook.private_key_file
else default_private_key_file
)
self.set_connection_info(**parameters)
def set_connection_info(
self,
address: str = "",
port: Optional[int] = 22,
public_address: str = "",
public_port: Optional[int] = 22,
username: str = "root",
password: str = "",
private_key_file: str = "",
) -> None:
if hasattr(self, "_connection_info"):
raise LisaException(
"node is set connection information already, cannot set again"
)
if not address and not public_address:
raise LisaException(
"at least one of address and public_address need to be set"
)
elif not address:
address = public_address
elif not public_address:
public_address = address
if not port and not public_port:
raise LisaException("at least one of port and public_port need to be set")
elif not port:
port = public_port
elif not public_port:
public_port = port
assert public_port
assert port
self._connection_info: ConnectionInfo = ConnectionInfo(
public_address,
public_port,
username,
password,
private_key_file,
)
self._shell = SshShell(self._connection_info)
self.public_address = public_address
self.public_port = public_port
self.internal_address = address
self.internal_port = port
def _initialize(self, *args: Any, **kwargs: Any) -> None:
assert self._connection_info, "call setConnectionInfo before use remote node"
super()._initialize(*args, **kwargs)
def _create_working_path(self) -> PurePath:
if self.is_posix:
remote_root_path = Path("$HOME")
else:
remote_root_path = Path("%TEMP%")
working_path = remote_root_path.joinpath(
constants.PATH_REMOTE_ROOT, constants.RUN_LOGIC_PATH
).as_posix()
# expand environment variables in path
echo = self.tools[Echo]
result = echo.run(working_path, shell=True)
return self.get_pure_path(result.stdout)
class LocalNode(Node):
def __init__(
self,
runbook: schema.Node,
index: int,
logger_name: str,
base_log_path: Optional[Path],
) -> None:
super().__init__(
index=index,
runbook=runbook,
logger_name=logger_name,
base_log_path=base_log_path,
)
self._shell = LocalShell()
@property
def is_remote(self) -> bool:
return False
@classmethod
def type_name(cls) -> str:
return constants.ENVIRONMENTS_NODES_LOCAL
@classmethod
def type_schema(cls) -> Type[schema.TypedSchema]:
return schema.LocalNode
def _create_working_path(self) -> PurePath:
return constants.RUN_LOCAL_PATH
def __repr__(self) -> str:
return "local"
class Nodes:
def __init__(self) -> None:
super().__init__()
self._default: Optional[Node] = None
self._list: List[Node] = []
def __getitem__(self, key: Union[int, str]) -> Node:
found = None
if not self._list:
raise LisaException("no node found")
if isinstance(key, int):
if len(self._list) > key:
found = self._list[key]
else:
for node in self._list:
if node.name == key:
found = node
break
if not found:
raise KeyError(f"cannot find node {key}")
return found
def __setitem__(self, key: Union[int, str], v: Node) -> None:
raise NotImplementedError("don't set node directly, call from_*")
def __len__(self) -> int:
return len(self._list)
@property
def default(self) -> Node:
if self._default is None:
default = None
for node in self._list:
if node.is_default:
default = node
break
if default is None:
if len(self._list) == 0:
raise LisaException("No node found in current environment")
else:
default = self._list[0]
self._default = default
return self._default
def list(self) -> Iterable[Node]:
for node in self._list:
yield node
def initialize(self) -> None:
for node in self._list:
node.initialize()
def close(self) -> None:
for node in self._list:
node.close()
def from_existing(
self,
node_runbook: schema.Node,
environment_name: str,
base_log_path: Optional[Path] = None,
) -> Node:
node = Node.create(
index=len(self._list),
runbook=node_runbook,
logger_name=environment_name,
base_log_path=base_log_path,
)
self._list.append(node)
return node
def from_requirement(
self,
node_requirement: schema.NodeSpace,
environment_name: str,
base_log_path: Optional[Path] = None,
) -> Node:
min_requirement = cast(
schema.Capability,
node_requirement.generate_min_capability(node_requirement),
)
assert isinstance(min_requirement.node_count, int), (
f"must be int after generate_min_capability, "
f"actual: {min_requirement.node_count}"
)
assert min_requirement.node_count == 1, f"actual: {min_requirement.node_count}"
mock_runbook = schema.RemoteNode(
type=constants.ENVIRONMENTS_NODES_REMOTE,
capability=min_requirement,
is_default=node_requirement.is_default,
)
node = Node.create(
index=len(self._list),
runbook=mock_runbook,
logger_name=environment_name,
base_log_path=base_log_path,
)
self._list.append(node)
return node
def quick_connect(runbook: schema.Node, logger_name: str = "", index: int = -1) -> Node:
node = Node.create(index, runbook, logger_name=logger_name)
if isinstance(node, RemoteNode):
node.set_connection_info_by_runbook()
node.initialize()
return node
| true | true |
f7286c4cfe10efa0c634c024f8dd2ceb244da2ab | 1,598 | py | Python | src/ad/admin.py | mio-eldar/ads-board | f53a5aabcff5ba8cab4e92f58baaf5017506bbd4 | [
"MIT"
] | null | null | null | src/ad/admin.py | mio-eldar/ads-board | f53a5aabcff5ba8cab4e92f58baaf5017506bbd4 | [
"MIT"
] | null | null | null | src/ad/admin.py | mio-eldar/ads-board | f53a5aabcff5ba8cab4e92f58baaf5017506bbd4 | [
"MIT"
] | null | null | null | from django.contrib import admin
from imagekit.admin import AdminThumbnail
from mptt.admin import DraggableMPTTAdmin
from ad.models import City, \
Category, AdvertisementImage, Advertisement
class SubCategoryInline(admin.TabularInline):
model = Category
extra = 14
exclude = ('icon', 'slug')
class CategoryAdmin(DraggableMPTTAdmin):
fieldsets = (
('Информация', {'fields': ('title', 'parent', 'icon', 'slug')}),
)
inlines = (SubCategoryInline,)
class AdvertisementImageInline(admin.TabularInline):
model = AdvertisementImage
extra = 3
fieldsets = (
('Основные поля', {'fields': ('id', 'image') }),
)
class AdvertisementAdmin(admin.ModelAdmin):
inlines = (AdvertisementImageInline,)
fieldsets = (
('Основные поля', {'fields': ('category', 'description', 'price', 'currency', 'city') }),
('Контакты', {'fields': ('author', 'phone_number', 'hide_phone_number', 'email')}),
('Остальное', {'fields': ('likes', 'views', 'created_at', 'updated_at')})
)
readonly_fields = ('created_at','updated_at')
list_display = ('id', 'description','category', 'count_images')
def count_images(self, obj):
return obj.images.count()
class CityAdmin(admin.ModelAdmin):
pass
admin.site.register(City, CityAdmin)
admin.site.register(Advertisement, AdvertisementAdmin)
admin.site.register(
Category,
CategoryAdmin,
list_display=(
'tree_actions',
'indented_title',
'icon',
'slug'
),
list_display_links=(
'indented_title',
)) | 26.633333 | 97 | 0.645807 | from django.contrib import admin
from imagekit.admin import AdminThumbnail
from mptt.admin import DraggableMPTTAdmin
from ad.models import City, \
Category, AdvertisementImage, Advertisement
class SubCategoryInline(admin.TabularInline):
model = Category
extra = 14
exclude = ('icon', 'slug')
class CategoryAdmin(DraggableMPTTAdmin):
fieldsets = (
('Информация', {'fields': ('title', 'parent', 'icon', 'slug')}),
)
inlines = (SubCategoryInline,)
class AdvertisementImageInline(admin.TabularInline):
model = AdvertisementImage
extra = 3
fieldsets = (
('Основные поля', {'fields': ('id', 'image') }),
)
class AdvertisementAdmin(admin.ModelAdmin):
inlines = (AdvertisementImageInline,)
fieldsets = (
('Основные поля', {'fields': ('category', 'description', 'price', 'currency', 'city') }),
('Контакты', {'fields': ('author', 'phone_number', 'hide_phone_number', 'email')}),
('Остальное', {'fields': ('likes', 'views', 'created_at', 'updated_at')})
)
readonly_fields = ('created_at','updated_at')
list_display = ('id', 'description','category', 'count_images')
def count_images(self, obj):
return obj.images.count()
class CityAdmin(admin.ModelAdmin):
pass
admin.site.register(City, CityAdmin)
admin.site.register(Advertisement, AdvertisementAdmin)
admin.site.register(
Category,
CategoryAdmin,
list_display=(
'tree_actions',
'indented_title',
'icon',
'slug'
),
list_display_links=(
'indented_title',
)) | true | true |
f7286cc0f043e840d3c07edbdea02c22e3394f40 | 1,049 | py | Python | unleash/plugins/footer.py | mbr/unleash | f36c6e6600868bc054f5b8d4cf1c03ea8eb8da4c | [
"MIT"
] | 5 | 2015-05-29T21:51:44.000Z | 2019-09-16T16:59:46.000Z | unleash/plugins/footer.py | mbr/unleash | f36c6e6600868bc054f5b8d4cf1c03ea8eb8da4c | [
"MIT"
] | null | null | null | unleash/plugins/footer.py | mbr/unleash | f36c6e6600868bc054f5b8d4cf1c03ea8eb8da4c | [
"MIT"
] | null | null | null | from click import Option
from unleash import opts, log, commit
from unleash import __version__ as unleash_version
PLUGIN_NAME = 'footer'
# make sure version info is written first, so the footer does not get
# overwritten
PLUGIN_DEPENDS = ['versions']
FOOTER_FORMAT = u'\n[commit by unleash {}]\n'
def setup(cli):
cli.commands['release'].params.append(Option(
['--footer/--no-footer', '-f/-F'], default=False,
help='Add "created by unleash" footer (default: disabled).'
))
cli.commands['release'].params.append(Option(
['--unleash-committer/--no-unleash-committer', '-c/-C'],
default=True, help='Set the committer to unleash (default: enabled).'
))
def prepare_release():
if not opts['footer'] and not opts['unleash_committer']:
return
log.info('Marking release as released by unleash')
if opts['footer']:
commit.message += FOOTER_FORMAT.format(unleash_version)
if opts['unleash_committer']:
commit.committer = 'unleash <{}>'.format(unleash_version)
| 27.605263 | 77 | 0.673022 | from click import Option
from unleash import opts, log, commit
from unleash import __version__ as unleash_version
PLUGIN_NAME = 'footer'
PLUGIN_DEPENDS = ['versions']
FOOTER_FORMAT = u'\n[commit by unleash {}]\n'
def setup(cli):
cli.commands['release'].params.append(Option(
['--footer/--no-footer', '-f/-F'], default=False,
help='Add "created by unleash" footer (default: disabled).'
))
cli.commands['release'].params.append(Option(
['--unleash-committer/--no-unleash-committer', '-c/-C'],
default=True, help='Set the committer to unleash (default: enabled).'
))
def prepare_release():
if not opts['footer'] and not opts['unleash_committer']:
return
log.info('Marking release as released by unleash')
if opts['footer']:
commit.message += FOOTER_FORMAT.format(unleash_version)
if opts['unleash_committer']:
commit.committer = 'unleash <{}>'.format(unleash_version)
| true | true |
f7286cf83d441069fec4a82586b6aab0a0b7848e | 16,514 | py | Python | scipy/linalg/lapack.py | ZHG2017/scipy | 4fe52aed1ea375c2aa7a2a6b634787c9abdf32e3 | [
"BSD-3-Clause"
] | 1 | 2021-05-22T21:07:15.000Z | 2021-05-22T21:07:15.000Z | scipy/linalg/lapack.py | ZHG2017/scipy | 4fe52aed1ea375c2aa7a2a6b634787c9abdf32e3 | [
"BSD-3-Clause"
] | 1 | 2021-04-03T20:19:36.000Z | 2021-04-03T20:19:36.000Z | scipy/linalg/lapack.py | YarivLevy81/scipy | 859c1061b3d5aa30c4466824049d69edde5499a2 | [
"BSD-3-Clause"
] | 1 | 2020-06-28T00:46:20.000Z | 2020-06-28T00:46:20.000Z | """
Low-level LAPACK functions (:mod:`scipy.linalg.lapack`)
=======================================================
This module contains low-level functions from the LAPACK library.
The `*gegv` family of routines have been removed from LAPACK 3.6.0
and have been deprecated in SciPy 0.17.0. They will be removed in
a future release.
.. versionadded:: 0.12.0
.. note::
The common ``overwrite_<>`` option in many routines, allows the
input arrays to be overwritten to avoid extra memory allocation.
However this requires the array to satisfy two conditions
which are memory order and the data type to match exactly the
order and the type expected by the routine.
As an example, if you pass a double precision float array to any
``S....`` routine which expects single precision arguments, f2py
will create an intermediate array to match the argument types and
overwriting will be performed on that intermediate array.
Similarly, if a C-contiguous array is passed, f2py will pass a
FORTRAN-contiguous array internally. Please make sure that these
details are satisfied. More information can be found in the f2py
documentation.
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
-----------------
.. autosummary::
:toctree: generated/
get_lapack_funcs
All functions
-------------
.. autosummary::
:toctree: generated/
sgbsv
dgbsv
cgbsv
zgbsv
sgbtrf
dgbtrf
cgbtrf
zgbtrf
sgbtrs
dgbtrs
cgbtrs
zgbtrs
sgebal
dgebal
cgebal
zgebal
sgecon
dgecon
cgecon
zgecon
sgeequ
dgeequ
cgeequ
zgeequ
sgeequb
dgeequb
cgeequb
zgeequb
sgees
dgees
cgees
zgees
sgeev
dgeev
cgeev
zgeev
sgeev_lwork
dgeev_lwork
cgeev_lwork
zgeev_lwork
sgegv
dgegv
cgegv
zgegv
sgehrd
dgehrd
cgehrd
zgehrd
sgehrd_lwork
dgehrd_lwork
cgehrd_lwork
zgehrd_lwork
sgejsv
dgejsv
sgels
dgels
cgels
zgels
sgels_lwork
dgels_lwork
cgels_lwork
zgels_lwork
sgelsd
dgelsd
cgelsd
zgelsd
sgelsd_lwork
dgelsd_lwork
cgelsd_lwork
zgelsd_lwork
sgelss
dgelss
cgelss
zgelss
sgelss_lwork
dgelss_lwork
cgelss_lwork
zgelss_lwork
sgelsy
dgelsy
cgelsy
zgelsy
sgelsy_lwork
dgelsy_lwork
cgelsy_lwork
zgelsy_lwork
sgeqp3
dgeqp3
cgeqp3
zgeqp3
sgeqrf
dgeqrf
cgeqrf
zgeqrf
sgeqrf_lwork
dgeqrf_lwork
cgeqrf_lwork
zgeqrf_lwork
sgeqrfp
dgeqrfp
cgeqrfp
zgeqrfp
sgeqrfp_lwork
dgeqrfp_lwork
cgeqrfp_lwork
zgeqrfp_lwork
sgerqf
dgerqf
cgerqf
zgerqf
sgesdd
dgesdd
cgesdd
zgesdd
sgesdd_lwork
dgesdd_lwork
cgesdd_lwork
zgesdd_lwork
sgesv
dgesv
cgesv
zgesv
sgesvd
dgesvd
cgesvd
zgesvd
sgesvd_lwork
dgesvd_lwork
cgesvd_lwork
zgesvd_lwork
sgesvx
dgesvx
cgesvx
zgesvx
sgetrf
dgetrf
cgetrf
zgetrf
sgetc2
dgetc2
cgetc2
zgetc2
sgetri
dgetri
cgetri
zgetri
sgetri_lwork
dgetri_lwork
cgetri_lwork
zgetri_lwork
sgetrs
dgetrs
cgetrs
zgetrs
sgesc2
dgesc2
cgesc2
zgesc2
sgges
dgges
cgges
zgges
sggev
dggev
cggev
zggev
sgglse
dgglse
cgglse
zgglse
sgglse_lwork
dgglse_lwork
cgglse_lwork
zgglse_lwork
sgtsv
dgtsv
cgtsv
zgtsv
sgtsvx
dgtsvx
cgtsvx
zgtsvx
chbevd
zhbevd
chbevx
zhbevx
checon
zhecon
cheequb
zheequb
cheev
zheev
cheev_lwork
zheev_lwork
cheevd
zheevd
cheevd_lwork
zheevd_lwork
cheevr
zheevr
cheevr_lwork
zheevr_lwork
cheevx
zheevx
cheevx_lwork
zheevx_lwork
chegst
zhegst
chegv
zhegv
chegv_lwork
zhegv_lwork
chegvd
zhegvd
chegvx
zhegvx
chegvx_lwork
zhegvx_lwork
chesv
zhesv
chesv_lwork
zhesv_lwork
chesvx
zhesvx
chesvx_lwork
zhesvx_lwork
chetrd
zhetrd
chetrd_lwork
zhetrd_lwork
chetrf
zhetrf
chetrf_lwork
zhetrf_lwork
chfrk
zhfrk
slamch
dlamch
slange
dlange
clange
zlange
slarf
dlarf
clarf
zlarf
slarfg
dlarfg
clarfg
zlarfg
slartg
dlartg
clartg
zlartg
slasd4
dlasd4
slaswp
dlaswp
claswp
zlaswp
slauum
dlauum
clauum
zlauum
sorcsd
dorcsd
sorcsd_lwork
dorcsd_lwork
sorghr
dorghr
sorghr_lwork
dorghr_lwork
sorgqr
dorgqr
sorgrq
dorgrq
sormqr
dormqr
sormrz
dormrz
sormrz_lwork
dormrz_lwork
spbsv
dpbsv
cpbsv
zpbsv
spbtrf
dpbtrf
cpbtrf
zpbtrf
spbtrs
dpbtrs
cpbtrs
zpbtrs
spftrf
dpftrf
cpftrf
zpftrf
spftri
dpftri
cpftri
zpftri
spftrs
dpftrs
cpftrs
zpftrs
spocon
dpocon
cpocon
zpocon
spstrf
dpstrf
cpstrf
zpstrf
spstf2
dpstf2
cpstf2
zpstf2
sposv
dposv
cposv
zposv
sposvx
dposvx
cposvx
zposvx
spotrf
dpotrf
cpotrf
zpotrf
spotri
dpotri
cpotri
zpotri
spotrs
dpotrs
cpotrs
zpotrs
sppcon
dppcon
cppcon
zppcon
sppsv
dppsv
cppsv
zppsv
spptrf
dpptrf
cpptrf
zpptrf
spptri
dpptri
cpptri
zpptri
spptrs
dpptrs
cpptrs
zpptrs
sptsv
dptsv
cptsv
zptsv
sptsvx
dptsvx
cptsvx
zptsvx
spttrf
dpttrf
cpttrf
zpttrf
spttrs
dpttrs
cpttrs
zpttrs
spteqr
dpteqr
cpteqr
zpteqr
crot
zrot
ssbev
dsbev
ssbevd
dsbevd
ssbevx
dsbevx
ssfrk
dsfrk
sstebz
dstebz
sstein
dstein
sstemr
dstemr
sstemr_lwork
dstemr_lwork
ssterf
dsterf
sstev
dstev
ssycon
dsycon
csycon
zsycon
ssyconv
dsyconv
csyconv
zsyconv
ssyequb
dsyequb
csyequb
zsyequb
ssyev
dsyev
ssyev_lwork
dsyev_lwork
ssyevd
dsyevd
ssyevd_lwork
dsyevd_lwork
ssyevr
dsyevr
ssyevr_lwork
dsyevr_lwork
ssyevx
dsyevx
ssyevx_lwork
dsyevx_lwork
ssygst
dsygst
ssygv
dsygv
ssygv_lwork
dsygv_lwork
ssygvd
dsygvd
ssygvx
dsygvx
ssygvx_lwork
dsygvx_lwork
ssysv
dsysv
csysv
zsysv
ssysv_lwork
dsysv_lwork
csysv_lwork
zsysv_lwork
ssysvx
dsysvx
csysvx
zsysvx
ssysvx_lwork
dsysvx_lwork
csysvx_lwork
zsysvx_lwork
ssytf2
dsytf2
csytf2
zsytf2
ssytrd
dsytrd
ssytrd_lwork
dsytrd_lwork
ssytrf
dsytrf
csytrf
zsytrf
ssytrf_lwork
dsytrf_lwork
csytrf_lwork
zsytrf_lwork
stbtrs
dtbtrs
ctbtrs
ztbtrs
stfsm
dtfsm
ctfsm
ztfsm
stfttp
dtfttp
ctfttp
ztfttp
stfttr
dtfttr
ctfttr
ztfttr
stgexc
dtgexc
ctgexc
ztgexc
stgsen
dtgsen
ctgsen
ztgsen
stpttf
dtpttf
ctpttf
ztpttf
stpttr
dtpttr
ctpttr
ztpttr
strsyl
dtrsyl
ctrsyl
ztrsyl
strtri
dtrtri
ctrtri
ztrtri
strtrs
dtrtrs
ctrtrs
ztrtrs
strttf
dtrttf
ctrttf
ztrttf
strttp
dtrttp
ctrttp
ztrttp
stzrzf
dtzrzf
ctzrzf
ztzrzf
stzrzf_lwork
dtzrzf_lwork
ctzrzf_lwork
ztzrzf_lwork
cunghr
zunghr
cunghr_lwork
zunghr_lwork
cungqr
zungqr
cungrq
zungrq
cunmqr
zunmqr
sgeqrt
dgeqrt
cgeqrt
zgeqrt
sgemqrt
dgemqrt
cgemqrt
zgemqrt
sgttrf
dgttrf
cgttrf
zgttrf
sgttrs
dgttrs
cgttrs
zgttrs
stpqrt
dtpqrt
ctpqrt
ztpqrt
stpmqrt
dtpmqrt
ctpmqrt
ztpmqrt
cuncsd
zuncsd
cuncsd_lwork
zuncsd_lwork
cunmrz
zunmrz
cunmrz_lwork
zunmrz_lwork
ilaver
"""
#
# Author: Pearu Peterson, March 2002
#
import numpy as _np
from .blas import _get_funcs, _memoize_get_funcs
from scipy.linalg import _flapack
from re import compile as regex_compile
try:
from scipy.linalg import _clapack
except ImportError:
_clapack = None
try:
from scipy.linalg import _flapack_64
HAS_ILP64 = True
except ImportError:
HAS_ILP64 = False
_flapack_64 = None
# Backward compatibility
from scipy._lib._util import DeprecatedImport as _DeprecatedImport
clapack = _DeprecatedImport("scipy.linalg.blas.clapack", "scipy.linalg.lapack")
flapack = _DeprecatedImport("scipy.linalg.blas.flapack", "scipy.linalg.lapack")
# Expose all functions (only flapack --- clapack is an implementation detail)
empty_module = None
from scipy.linalg._flapack import *
del empty_module
__all__ = ['get_lapack_funcs']
_dep_message = """The `*gegv` family of routines has been deprecated in
LAPACK 3.6.0 in favor of the `*ggev` family of routines.
The corresponding wrappers will be removed from SciPy in
a future release."""
cgegv = _np.deprecate(cgegv, old_name='cgegv', message=_dep_message)
dgegv = _np.deprecate(dgegv, old_name='dgegv', message=_dep_message)
sgegv = _np.deprecate(sgegv, old_name='sgegv', message=_dep_message)
zgegv = _np.deprecate(zgegv, old_name='zgegv', message=_dep_message)
# Modify _flapack in this scope so the deprecation warnings apply to
# functions returned by get_lapack_funcs.
_flapack.cgegv = cgegv
_flapack.dgegv = dgegv
_flapack.sgegv = sgegv
_flapack.zgegv = zgegv
# some convenience alias for complex functions
_lapack_alias = {
'corghr': 'cunghr', 'zorghr': 'zunghr',
'corghr_lwork': 'cunghr_lwork', 'zorghr_lwork': 'zunghr_lwork',
'corgqr': 'cungqr', 'zorgqr': 'zungqr',
'cormqr': 'cunmqr', 'zormqr': 'zunmqr',
'corgrq': 'cungrq', 'zorgrq': 'zungrq',
}
# Place guards against docstring rendering issues with special characters
p1 = regex_compile(r'with bounds (?P<b>.*?)( and (?P<s>.*?) storage){0,1}\n')
p2 = regex_compile(r'Default: (?P<d>.*?)\n')
def backtickrepl(m):
if m.group('s'):
return ('with bounds ``{}`` with ``{}`` storage\n'
''.format(m.group('b'), m.group('s')))
else:
return 'with bounds ``{}``\n'.format(m.group('b'))
for routine in [ssyevr, dsyevr, cheevr, zheevr,
ssyevx, dsyevx, cheevx, zheevx,
ssygvd, dsygvd, chegvd, zhegvd]:
if routine.__doc__:
routine.__doc__ = p1.sub(backtickrepl, routine.__doc__)
routine.__doc__ = p2.sub('Default ``\\1``\n', routine.__doc__)
else:
continue
del regex_compile, p1, p2, backtickrepl
@_memoize_get_funcs
def get_lapack_funcs(names, arrays=(), dtype=None, ilp64=False):
"""Return available LAPACK function objects from names.
Arrays are used to determine the optimal prefix of LAPACK routines.
Parameters
----------
names : str or sequence of str
Name(s) of LAPACK functions without type prefix.
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of LAPACK
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
ilp64 : {True, False, 'preferred'}, optional
Whether to return ILP64 routine variant.
Choosing 'preferred' returns ILP64 routine if available, and
otherwise the 32-bit routine. Default: False
Returns
-------
funcs : list
List containing the found function(s).
Notes
-----
This routine automatically chooses between Fortran/C
interfaces. Fortran code is used whenever possible for arrays with
column major order. In all other cases, C code is preferred.
In LAPACK, the naming convention is that all functions start with a
type prefix, which depends on the type of the principal
matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy
types {float32, float64, complex64, complex128} respectively, and
are stored in attribute ``typecode`` of the returned functions.
Examples
--------
Suppose we would like to use '?lange' routine which computes the selected
norm of an array. We pass our array in order to get the correct 'lange'
flavor.
>>> import scipy.linalg as LA
>>> a = np.random.rand(3,2)
>>> x_lange = LA.get_lapack_funcs('lange', (a,))
>>> x_lange.typecode
'd'
>>> x_lange = LA.get_lapack_funcs('lange',(a*1j,))
>>> x_lange.typecode
'z'
Several LAPACK routines work best when its internal WORK array has
the optimal size (big enough for fast computation and small enough to
avoid waste of memory). This size is determined also by a dedicated query
to the function which is often wrapped as a standalone function and
commonly denoted as ``###_lwork``. Below is an example for ``?sysv``
>>> import scipy.linalg as LA
>>> a = np.random.rand(1000,1000)
>>> b = np.random.rand(1000,1)*1j
>>> # We pick up zsysv and zsysv_lwork due to b array
... xsysv, xlwork = LA.get_lapack_funcs(('sysv', 'sysv_lwork'), (a, b))
>>> opt_lwork, _ = xlwork(a.shape[0]) # returns a complex for 'z' prefix
>>> udut, ipiv, x, info = xsysv(a, b, lwork=int(opt_lwork.real))
"""
if isinstance(ilp64, str):
if ilp64 == 'preferred':
ilp64 = HAS_ILP64
else:
raise ValueError("Invalid value for 'ilp64'")
if not ilp64:
return _get_funcs(names, arrays, dtype,
"LAPACK", _flapack, _clapack,
"flapack", "clapack", _lapack_alias,
ilp64=False)
else:
if not HAS_ILP64:
raise RuntimeError("LAPACK ILP64 routine requested, but Scipy "
"compiled only with 32-bit BLAS")
return _get_funcs(names, arrays, dtype,
"LAPACK", _flapack_64, None,
"flapack_64", None, _lapack_alias,
ilp64=True)
_int32_max = _np.iinfo(_np.int32).max
_int64_max = _np.iinfo(_np.int64).max
def _compute_lwork(routine, *args, **kwargs):
"""
Round floating-point lwork returned by lapack to integer.
Several LAPACK routines compute optimal values for LWORK, which
they return in a floating-point variable. However, for large
values of LWORK, single-precision floating point is not sufficient
to hold the exact value --- some LAPACK versions (<= 3.5.0 at
least) truncate the returned integer to single precision and in
some cases this can be smaller than the required value.
Examples
--------
>>> from scipy.linalg import lapack
>>> n = 5000
>>> s_r, s_lw = lapack.get_lapack_funcs(('sysvx', 'sysvx_lwork'))
>>> lwork = lapack._compute_lwork(s_lw, n)
>>> lwork
32000
"""
dtype = getattr(routine, 'dtype', None)
int_dtype = getattr(routine, 'int_dtype', None)
ret = routine(*args, **kwargs)
if ret[-1] != 0:
raise ValueError("Internal work array size computation failed: "
"%d" % (ret[-1],))
if len(ret) == 2:
return _check_work_float(ret[0].real, dtype, int_dtype)
else:
return tuple(_check_work_float(x.real, dtype, int_dtype)
for x in ret[:-1])
def _check_work_float(value, dtype, int_dtype):
"""
Convert LAPACK-returned work array size float to integer,
carefully for single-precision types.
"""
if dtype == _np.float32 or dtype == _np.complex64:
# Single-precision routine -- take next fp value to work
# around possible truncation in LAPACK code
value = _np.nextafter(value, _np.inf, dtype=_np.float32)
value = int(value)
if int_dtype.itemsize == 4:
if value < 0 or value > _int32_max:
raise ValueError("Too large work array required -- computation "
"cannot be performed with standard 32-bit"
" LAPACK.")
elif int_dtype.itemsize == 8:
if value < 0 or value > _int64_max:
raise ValueError("Too large work array required -- computation"
" cannot be performed with standard 64-bit"
" LAPACK.")
return value
| 15.802871 | 79 | 0.641274 |
import numpy as _np
from .blas import _get_funcs, _memoize_get_funcs
from scipy.linalg import _flapack
from re import compile as regex_compile
try:
from scipy.linalg import _clapack
except ImportError:
_clapack = None
try:
from scipy.linalg import _flapack_64
HAS_ILP64 = True
except ImportError:
HAS_ILP64 = False
_flapack_64 = None
from scipy._lib._util import DeprecatedImport as _DeprecatedImport
clapack = _DeprecatedImport("scipy.linalg.blas.clapack", "scipy.linalg.lapack")
flapack = _DeprecatedImport("scipy.linalg.blas.flapack", "scipy.linalg.lapack")
empty_module = None
from scipy.linalg._flapack import *
del empty_module
__all__ = ['get_lapack_funcs']
_dep_message = """The `*gegv` family of routines has been deprecated in
LAPACK 3.6.0 in favor of the `*ggev` family of routines.
The corresponding wrappers will be removed from SciPy in
a future release."""
cgegv = _np.deprecate(cgegv, old_name='cgegv', message=_dep_message)
dgegv = _np.deprecate(dgegv, old_name='dgegv', message=_dep_message)
sgegv = _np.deprecate(sgegv, old_name='sgegv', message=_dep_message)
zgegv = _np.deprecate(zgegv, old_name='zgegv', message=_dep_message)
_flapack.cgegv = cgegv
_flapack.dgegv = dgegv
_flapack.sgegv = sgegv
_flapack.zgegv = zgegv
_lapack_alias = {
'corghr': 'cunghr', 'zorghr': 'zunghr',
'corghr_lwork': 'cunghr_lwork', 'zorghr_lwork': 'zunghr_lwork',
'corgqr': 'cungqr', 'zorgqr': 'zungqr',
'cormqr': 'cunmqr', 'zormqr': 'zunmqr',
'corgrq': 'cungrq', 'zorgrq': 'zungrq',
}
p1 = regex_compile(r'with bounds (?P<b>.*?)( and (?P<s>.*?) storage){0,1}\n')
p2 = regex_compile(r'Default: (?P<d>.*?)\n')
def backtickrepl(m):
if m.group('s'):
return ('with bounds ``{}`` with ``{}`` storage\n'
''.format(m.group('b'), m.group('s')))
else:
return 'with bounds ``{}``\n'.format(m.group('b'))
for routine in [ssyevr, dsyevr, cheevr, zheevr,
ssyevx, dsyevx, cheevx, zheevx,
ssygvd, dsygvd, chegvd, zhegvd]:
if routine.__doc__:
routine.__doc__ = p1.sub(backtickrepl, routine.__doc__)
routine.__doc__ = p2.sub('Default ``\\1``\n', routine.__doc__)
else:
continue
del regex_compile, p1, p2, backtickrepl
@_memoize_get_funcs
def get_lapack_funcs(names, arrays=(), dtype=None, ilp64=False):
if isinstance(ilp64, str):
if ilp64 == 'preferred':
ilp64 = HAS_ILP64
else:
raise ValueError("Invalid value for 'ilp64'")
if not ilp64:
return _get_funcs(names, arrays, dtype,
"LAPACK", _flapack, _clapack,
"flapack", "clapack", _lapack_alias,
ilp64=False)
else:
if not HAS_ILP64:
raise RuntimeError("LAPACK ILP64 routine requested, but Scipy "
"compiled only with 32-bit BLAS")
return _get_funcs(names, arrays, dtype,
"LAPACK", _flapack_64, None,
"flapack_64", None, _lapack_alias,
ilp64=True)
_int32_max = _np.iinfo(_np.int32).max
_int64_max = _np.iinfo(_np.int64).max
def _compute_lwork(routine, *args, **kwargs):
dtype = getattr(routine, 'dtype', None)
int_dtype = getattr(routine, 'int_dtype', None)
ret = routine(*args, **kwargs)
if ret[-1] != 0:
raise ValueError("Internal work array size computation failed: "
"%d" % (ret[-1],))
if len(ret) == 2:
return _check_work_float(ret[0].real, dtype, int_dtype)
else:
return tuple(_check_work_float(x.real, dtype, int_dtype)
for x in ret[:-1])
def _check_work_float(value, dtype, int_dtype):
if dtype == _np.float32 or dtype == _np.complex64:
value = _np.nextafter(value, _np.inf, dtype=_np.float32)
value = int(value)
if int_dtype.itemsize == 4:
if value < 0 or value > _int32_max:
raise ValueError("Too large work array required -- computation "
"cannot be performed with standard 32-bit"
" LAPACK.")
elif int_dtype.itemsize == 8:
if value < 0 or value > _int64_max:
raise ValueError("Too large work array required -- computation"
" cannot be performed with standard 64-bit"
" LAPACK.")
return value
| true | true |
f7286d5721ce44c652e278ca9b476680524bd3e9 | 4,166 | py | Python | code-sample-python/python_django_prometheus/metrics_app/views.py | aquatir/learntocode | 9b860a528ded64fab2686a93c49dfd4b3947d6c9 | [
"MIT"
] | null | null | null | code-sample-python/python_django_prometheus/metrics_app/views.py | aquatir/learntocode | 9b860a528ded64fab2686a93c49dfd4b3947d6c9 | [
"MIT"
] | null | null | null | code-sample-python/python_django_prometheus/metrics_app/views.py | aquatir/learntocode | 9b860a528ded64fab2686a93c49dfd4b3947d6c9 | [
"MIT"
] | null | null | null | import os
import time
from random import SystemRandom
from django.contrib.auth.models import User, Group
from django.http import HttpResponse
from prometheus_client import Counter, Gauge, Summary, Histogram, Info, Enum
from rest_framework import viewsets, permissions
from metrics_app.models import MyModel
from metrics_app.serializers import UserSerializer, GroupSerializer
# Counts... stuff
COUNTER_CALLS_TOTAL = Counter(
"my_app_calls_total", "Total number of calls to my_app", ["some_label"]
)
COUNTER_EXCEPTIONS = Counter("my_app_exceptions", "Number of exceptions")
# exposes a single value which can be changed in time
# good for measuring in-flight things, e.g. current number of processed tasks
GAUGE_INDEX = Gauge("my_app_random_gauge", "my_app random gauge", ["another_label"])
# create two metrics
# - my_app_summary_count: number of times this was called
# - my_app_summary_sum: total sum of values
SUMMARY_INDEX = Summary("my_app_summary", "my_app summary")
# measure values and create a histogram of buckets. Uses generic RPC bucket by default buckets of:
# my_app_histogram_bucket{le="0.005"} 0.0
# my_app_histogram_bucket{le="0.01"} 0.0
# my_app_histogram_bucket{le="0.025"} 0.0
# my_app_histogram_bucket{le="0.05"} 0.0
# my_app_histogram_bucket{le="0.075"} 0.0
# my_app_histogram_bucket{le="0.1"} 0.0
# my_app_histogram_bucket{le="0.25"} 0.0
# my_app_histogram_bucket{le="0.5"} 0.0
# my_app_histogram_bucket{le="0.75"} 0.0
# my_app_histogram_bucket{le="1.0"} 2.0
# my_app_histogram_bucket{le="2.5"} 5.0
# my_app_histogram_bucket{le="5.0"} 9.0
# my_app_histogram_bucket{le="7.5"} 9.0
# my_app_histogram_bucket{le="10.0"} 9.0
HISTOGRAM_HST = Histogram("my_app_histogram", "my_app_histogram")
# One-time static info.
# Could also NOT be one-time, but... why?
# metric name will have "_info" postfix added to it
INFO = Info("my_app", "generic my_app info")
INFO.info({"app_name": "my_app", "launched_at": str(time.time())})
# Set one state out of many for metric
# Essentially a gauge with one of the possible states
APP_STATES = ["RUNNING", "STOPPED", "IN_PROGRESS"]
ENUM_INDEX = Enum("my_app_current_state", "my app current state", states=APP_STATES)
RND = SystemRandom()
def index(request):
COUNTER_CALLS_TOTAL.labels(some_label="some_value").inc()
GAUGE_INDEX.labels(another_label="value").set(RND.random())
SUMMARY_INDEX.observe(RND.random())
ENUM_INDEX.state(RND.choice(APP_STATES))
return HttpResponse(f"Hello from {os.getpid()}")
def hist(request):
for i in range(0, 100):
value = RND.random() * 5
HISTOGRAM_HST.observe(value)
return HttpResponse("Hello, histogram")
# This is done automatically
@COUNTER_EXCEPTIONS.count_exceptions()
def ex(request):
"""1/5 requests result in Error which is either NotImplementedError or KeyError"""
if RND.random() > 0.8:
if RND.random() > 0.5:
raise NotImplementedError
else:
raise KeyError
return HttpResponse("You're lucky")
def model(request):
"""either create, update or delete model"""
rnd = RND.random()
return_text = ""
if rnd > 0.66:
MyModel.objects.create(text="kekw")
return_text = "Model created"
elif rnd > 0.33:
m = MyModel.objects.first()
if m is not None:
m.delete()
return_text = "Model deleted"
else:
m = MyModel.objects.last()
if m is not None:
m.text = f"updated text {rnd}"
m.save()
return_text = "Model updated"
if return_text == "":
return_text = "No changes"
return HttpResponse(return_text)
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by("-date_joined")
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
permission_classes = [permissions.IsAuthenticated]
| 31.801527 | 98 | 0.705473 | import os
import time
from random import SystemRandom
from django.contrib.auth.models import User, Group
from django.http import HttpResponse
from prometheus_client import Counter, Gauge, Summary, Histogram, Info, Enum
from rest_framework import viewsets, permissions
from metrics_app.models import MyModel
from metrics_app.serializers import UserSerializer, GroupSerializer
COUNTER_CALLS_TOTAL = Counter(
"my_app_calls_total", "Total number of calls to my_app", ["some_label"]
)
COUNTER_EXCEPTIONS = Counter("my_app_exceptions", "Number of exceptions")
GAUGE_INDEX = Gauge("my_app_random_gauge", "my_app random gauge", ["another_label"])
SUMMARY_INDEX = Summary("my_app_summary", "my_app summary")
HISTOGRAM_HST = Histogram("my_app_histogram", "my_app_histogram")
INFO = Info("my_app", "generic my_app info")
INFO.info({"app_name": "my_app", "launched_at": str(time.time())})
APP_STATES = ["RUNNING", "STOPPED", "IN_PROGRESS"]
ENUM_INDEX = Enum("my_app_current_state", "my app current state", states=APP_STATES)
RND = SystemRandom()
def index(request):
COUNTER_CALLS_TOTAL.labels(some_label="some_value").inc()
GAUGE_INDEX.labels(another_label="value").set(RND.random())
SUMMARY_INDEX.observe(RND.random())
ENUM_INDEX.state(RND.choice(APP_STATES))
return HttpResponse(f"Hello from {os.getpid()}")
def hist(request):
for i in range(0, 100):
value = RND.random() * 5
HISTOGRAM_HST.observe(value)
return HttpResponse("Hello, histogram")
@COUNTER_EXCEPTIONS.count_exceptions()
def ex(request):
if RND.random() > 0.8:
if RND.random() > 0.5:
raise NotImplementedError
else:
raise KeyError
return HttpResponse("You're lucky")
def model(request):
rnd = RND.random()
return_text = ""
if rnd > 0.66:
MyModel.objects.create(text="kekw")
return_text = "Model created"
elif rnd > 0.33:
m = MyModel.objects.first()
if m is not None:
m.delete()
return_text = "Model deleted"
else:
m = MyModel.objects.last()
if m is not None:
m.text = f"updated text {rnd}"
m.save()
return_text = "Model updated"
if return_text == "":
return_text = "No changes"
return HttpResponse(return_text)
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all().order_by("-date_joined")
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
permission_classes = [permissions.IsAuthenticated]
| true | true |
f7286dada8d628be90f9f2661246fdf96eb66692 | 3,465 | py | Python | GBDT_modeling.py | Expert68/hotel_recommendation | a6c1035c7e3ff2d824039855a2349b50f9143d37 | [
"Apache-2.0"
] | 1 | 2019-03-08T13:54:36.000Z | 2019-03-08T13:54:36.000Z | GBDT_modeling.py | Expert68/hotel_recommendation | a6c1035c7e3ff2d824039855a2349b50f9143d37 | [
"Apache-2.0"
] | null | null | null | GBDT_modeling.py | Expert68/hotel_recommendation | a6c1035c7e3ff2d824039855a2349b50f9143d37 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
import h5py
import os
from data_clean import pre_process,get_agg
#------------------------------定义评估标准---------------------------
def map5eval(preds,dtrain):
actual = dtrain.get_label()
predicted = preds.argsort(axis=1)[:-np.arange(5)]
metric = 0
for i in range(5):
metric += np.sum(actual==predicted[:i])/(i+1)
metric /= actual.shape[0]
return 'map5',-metric
#------------------------------对模型进行训练-----------------------------------
clf = xgb.XGBClassifier(objective='multi:softmax',max_depth=5,n_estimators=300,learning_rate=0.01,nthread=4,subsample=0.7,colsample_bytree=0.7,min_child_weight=3,silent=False)
destinations = pd.read_csv('input/destinations.csv')
result = pd.read_csv('input/sample_result.csv')
agg1 = pd.read_csv('output/srch_dest_hc_hm_agg.csv')
if os.path.exists('rows_complete.txt'):
with open('rows_complete.txt','r') as f:
skipsize = int(f.readline())
else:
skipsize = 0
skip = 0 if skipsize==0 else range(1,skipsize)
tchunksize = 1000000
print('%d rows will be skipped and next %d rows will be used for training' % (skipsize, tchunksize))
train = pd.read_csv('input/train.csv', parse_dates=['date_time', 'srch_ci', 'srch_co'], skiprows=skip, nrows=tchunksize)
train = train[train.is_booking==1]
train = pd.merge(train, destinations, how='left', on='srch_destination_id')
train = pd.merge(train, agg1, how='left', on=['srch_destination_id','hotel_country','hotel_market'])
pre_process(train)
y = train.hotel_cluster
train.drop(['cnt', 'hotel_cluster', 'is_booking'], axis=1, inplace=True)
X_train, X_test, y_train, y_test = train_test_split(train, y, stratify=y, test_size=0.2)
clf.fit(X_train, y_train, early_stopping_rounds=50, eval_metric=map5eval, eval_set=[(X_train, y_train),(X_test, y_test)])
#-----------------------------对测试数据进行预测-----------------------------------
count = 0
chunksize = 10000
preds = np.empty((result.shape[0],clf.n_classes_))
reader = pd.read_csv('input/test.csv', parse_dates=['date_time', 'srch_ci', 'srch_co'], chunksize=chunksize)
for chunk in reader:
chunk = pd.merge(chunk, destinations, how='left', on='srch_destination_id')
chunk = pd.merge(chunk, agg1, how='left', on=['srch_destination_id', 'hotel_country', 'hotel_market'])
chunk.drop(['id'], axis=1, inplace=True)
pre_process(chunk)
pred = clf.predict_proba(chunk)
preds[count:(count + chunk.shape[0]), :] = pred
count = count + chunksize
print('%d rows completed' % count)
del clf
del agg1
if os.path.exists('output/probs/allpreds_xgb.h5'):
with h5py.File('output/probs/allpreds_xgb.h5', 'r+') as hf:
print('reading in and combining probabilities')
predshf = hf['preds']
preds += predshf.value
print('writing latest probabilities to file')
predshf[...] = preds
else:
with h5py.File('../output/probs/allpreds_xgb.h5', 'w') as hf:
print('writing latest probabilities to file')
hf.create_dataset('preds', data=preds)
print('generating submission')
col_ind = np.argsort(-preds, axis=1)[:,:5]
hc = [' '.join(row.astype(str)) for row in col_ind]
sub = pd.DataFrame(data=hc, index=result.id)
sub.reset_index(inplace=True)
sub.columns = result.columns
sub.to_csv('output/pred_sub.csv', index=False)
skipsize += tchunksize
with open('rows_complete.txt', 'w') as f:
f.write(str(skipsize)) | 39.827586 | 175 | 0.672439 | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
import h5py
import os
from data_clean import pre_process,get_agg
def map5eval(preds,dtrain):
actual = dtrain.get_label()
predicted = preds.argsort(axis=1)[:-np.arange(5)]
metric = 0
for i in range(5):
metric += np.sum(actual==predicted[:i])/(i+1)
metric /= actual.shape[0]
return 'map5',-metric
clf = xgb.XGBClassifier(objective='multi:softmax',max_depth=5,n_estimators=300,learning_rate=0.01,nthread=4,subsample=0.7,colsample_bytree=0.7,min_child_weight=3,silent=False)
destinations = pd.read_csv('input/destinations.csv')
result = pd.read_csv('input/sample_result.csv')
agg1 = pd.read_csv('output/srch_dest_hc_hm_agg.csv')
if os.path.exists('rows_complete.txt'):
with open('rows_complete.txt','r') as f:
skipsize = int(f.readline())
else:
skipsize = 0
skip = 0 if skipsize==0 else range(1,skipsize)
tchunksize = 1000000
print('%d rows will be skipped and next %d rows will be used for training' % (skipsize, tchunksize))
train = pd.read_csv('input/train.csv', parse_dates=['date_time', 'srch_ci', 'srch_co'], skiprows=skip, nrows=tchunksize)
train = train[train.is_booking==1]
train = pd.merge(train, destinations, how='left', on='srch_destination_id')
train = pd.merge(train, agg1, how='left', on=['srch_destination_id','hotel_country','hotel_market'])
pre_process(train)
y = train.hotel_cluster
train.drop(['cnt', 'hotel_cluster', 'is_booking'], axis=1, inplace=True)
X_train, X_test, y_train, y_test = train_test_split(train, y, stratify=y, test_size=0.2)
clf.fit(X_train, y_train, early_stopping_rounds=50, eval_metric=map5eval, eval_set=[(X_train, y_train),(X_test, y_test)])
count = 0
chunksize = 10000
preds = np.empty((result.shape[0],clf.n_classes_))
reader = pd.read_csv('input/test.csv', parse_dates=['date_time', 'srch_ci', 'srch_co'], chunksize=chunksize)
for chunk in reader:
chunk = pd.merge(chunk, destinations, how='left', on='srch_destination_id')
chunk = pd.merge(chunk, agg1, how='left', on=['srch_destination_id', 'hotel_country', 'hotel_market'])
chunk.drop(['id'], axis=1, inplace=True)
pre_process(chunk)
pred = clf.predict_proba(chunk)
preds[count:(count + chunk.shape[0]), :] = pred
count = count + chunksize
print('%d rows completed' % count)
del clf
del agg1
if os.path.exists('output/probs/allpreds_xgb.h5'):
with h5py.File('output/probs/allpreds_xgb.h5', 'r+') as hf:
print('reading in and combining probabilities')
predshf = hf['preds']
preds += predshf.value
print('writing latest probabilities to file')
predshf[...] = preds
else:
with h5py.File('../output/probs/allpreds_xgb.h5', 'w') as hf:
print('writing latest probabilities to file')
hf.create_dataset('preds', data=preds)
print('generating submission')
col_ind = np.argsort(-preds, axis=1)[:,:5]
hc = [' '.join(row.astype(str)) for row in col_ind]
sub = pd.DataFrame(data=hc, index=result.id)
sub.reset_index(inplace=True)
sub.columns = result.columns
sub.to_csv('output/pred_sub.csv', index=False)
skipsize += tchunksize
with open('rows_complete.txt', 'w') as f:
f.write(str(skipsize)) | true | true |
f7286fee62b804e76ae5e3b2c1c9de15c8e30622 | 3,640 | py | Python | research/attention_ocr/python/demo_inference.py | vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | [
"Apache-2.0"
] | 1 | 2019-09-11T09:41:11.000Z | 2019-09-11T09:41:11.000Z | research/attention_ocr/python/demo_inference.py | vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | [
"Apache-2.0"
] | null | null | null | research/attention_ocr/python/demo_inference.py | vincentcheny/models | afb1a59fc1bc792ac72d1a3e22e2469020529788 | [
"Apache-2.0"
] | null | null | null | """A script to run inference on a set of image files.
NOTE #1: The Attention OCR model was trained only using FSNS train dataset and
it will work only for images which look more or less similar to french street
names. In order to apply it to images from a different distribution you need
to retrain (or at least fine-tune) it using images from that distribution.
NOTE #2: This script exists for demo purposes only. It is highly recommended
to use tools and mechanisms provided by the TensorFlow Serving system to run
inference on TensorFlow models in production:
https://www.tensorflow.org/serving/serving_basic
Usage:
python demo_inference.py --batch_size=32 \
--checkpoint=model.ckpt-399731\
--image_path_pattern=./datasets/data/fsns/temp/fsns_train_%02d.png
"""
import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.python.platform import flags
from tensorflow.python.training import monitored_session
import common_flags
import datasets
import data_provider
FLAGS = flags.FLAGS
common_flags.define()
# e.g. ./datasets/data/fsns/temp/fsns_train_%02d.png
flags.DEFINE_string('image_path_pattern', '',
'A file pattern with a placeholder for the image index.')
def get_dataset_image_size(dataset_name):
# Ideally this info should be exposed through the dataset interface itself.
# But currently it is not available by other means.
ds_module = getattr(datasets, dataset_name)
height, width, _ = ds_module.DEFAULT_CONFIG['image_shape']
return width, height
def load_images(file_pattern, batch_size, dataset_name):
width, height = get_dataset_image_size(dataset_name)
images_actual_data = np.ndarray(shape=(batch_size, height, width, 3),
dtype='uint8')
for i in range(batch_size):
path = file_pattern % i
print("Reading %s" % path)
pil_image = PIL.Image.open(tf.gfile.GFile(path))
images_actual_data[i, ...] = np.asarray(pil_image)
return images_actual_data
def create_model(batch_size, dataset_name):
width, height = get_dataset_image_size(dataset_name)
dataset = common_flags.create_dataset(split_name=FLAGS.split_name)
model = common_flags.create_model(
num_char_classes=dataset.num_char_classes,
seq_length=dataset.max_sequence_length,
num_views=dataset.num_of_views,
null_code=dataset.null_code,
charset=dataset.charset)
raw_images = tf.placeholder(tf.uint8, shape=[batch_size, height, width, 3])
images = tf.map_fn(data_provider.preprocess_image, raw_images,
dtype=tf.float32)
endpoints = model.create_base(images, labels_one_hot=None)
return raw_images, endpoints
def run(checkpoint, batch_size, dataset_name, image_path_pattern):
images_placeholder, endpoints = create_model(batch_size,
dataset_name)
images_data = load_images(image_path_pattern, batch_size,
dataset_name)
session_creator = monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint)
with monitored_session.MonitoredSession(
session_creator=session_creator) as sess:
predictions = sess.run(endpoints.predicted_text,
feed_dict={images_placeholder: images_data})
return predictions.tolist()
def main(_):
print("Predicted strings:")
predictions = run(FLAGS.checkpoint, FLAGS.batch_size, FLAGS.dataset_name,
FLAGS.image_path_pattern)
for line in predictions:
print(line)
if __name__ == '__main__':
tf.app.run()
| 37.525773 | 79 | 0.721978 | import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.python.platform import flags
from tensorflow.python.training import monitored_session
import common_flags
import datasets
import data_provider
FLAGS = flags.FLAGS
common_flags.define()
flags.DEFINE_string('image_path_pattern', '',
'A file pattern with a placeholder for the image index.')
def get_dataset_image_size(dataset_name):
ds_module = getattr(datasets, dataset_name)
height, width, _ = ds_module.DEFAULT_CONFIG['image_shape']
return width, height
def load_images(file_pattern, batch_size, dataset_name):
width, height = get_dataset_image_size(dataset_name)
images_actual_data = np.ndarray(shape=(batch_size, height, width, 3),
dtype='uint8')
for i in range(batch_size):
path = file_pattern % i
print("Reading %s" % path)
pil_image = PIL.Image.open(tf.gfile.GFile(path))
images_actual_data[i, ...] = np.asarray(pil_image)
return images_actual_data
def create_model(batch_size, dataset_name):
width, height = get_dataset_image_size(dataset_name)
dataset = common_flags.create_dataset(split_name=FLAGS.split_name)
model = common_flags.create_model(
num_char_classes=dataset.num_char_classes,
seq_length=dataset.max_sequence_length,
num_views=dataset.num_of_views,
null_code=dataset.null_code,
charset=dataset.charset)
raw_images = tf.placeholder(tf.uint8, shape=[batch_size, height, width, 3])
images = tf.map_fn(data_provider.preprocess_image, raw_images,
dtype=tf.float32)
endpoints = model.create_base(images, labels_one_hot=None)
return raw_images, endpoints
def run(checkpoint, batch_size, dataset_name, image_path_pattern):
images_placeholder, endpoints = create_model(batch_size,
dataset_name)
images_data = load_images(image_path_pattern, batch_size,
dataset_name)
session_creator = monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint)
with monitored_session.MonitoredSession(
session_creator=session_creator) as sess:
predictions = sess.run(endpoints.predicted_text,
feed_dict={images_placeholder: images_data})
return predictions.tolist()
def main(_):
print("Predicted strings:")
predictions = run(FLAGS.checkpoint, FLAGS.batch_size, FLAGS.dataset_name,
FLAGS.image_path_pattern)
for line in predictions:
print(line)
if __name__ == '__main__':
tf.app.run()
| true | true |
f7287025fb43af465e1046aa25afa2d4b26bb484 | 537 | py | Python | scrape_ims/scrapy/scrape_dogs/scrape_dogs/items.py | nateGeorge/IDmyDog-udacity-submission | d247f03455ea4e72898fe80a6bf577e9c1cedc6d | [
"MIT"
] | null | null | null | scrape_ims/scrapy/scrape_dogs/scrape_dogs/items.py | nateGeorge/IDmyDog-udacity-submission | d247f03455ea4e72898fe80a6bf577e9c1cedc6d | [
"MIT"
] | null | null | null | scrape_ims/scrapy/scrape_dogs/scrape_dogs/items.py | nateGeorge/IDmyDog-udacity-submission | d247f03455ea4e72898fe80a6bf577e9c1cedc6d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class AKCItem(scrapy.Item):
image_urls = scrapy.Field()
images = scrapy.Field()
breed = scrapy.Field()
link = scrapy.Field()
desc = scrapy.Field()
thumb = scrapy.Field()
class WikiItem(scrapy.Item):
image_urls = scrapy.Field()
images = scrapy.Field()
breed = scrapy.Field()
link = scrapy.Field()
desc = scrapy.Field() | 23.347826 | 51 | 0.64432 |
import scrapy
class AKCItem(scrapy.Item):
image_urls = scrapy.Field()
images = scrapy.Field()
breed = scrapy.Field()
link = scrapy.Field()
desc = scrapy.Field()
thumb = scrapy.Field()
class WikiItem(scrapy.Item):
image_urls = scrapy.Field()
images = scrapy.Field()
breed = scrapy.Field()
link = scrapy.Field()
desc = scrapy.Field() | true | true |
f7287099910ac5f81a31174981d45c2f9db6131d | 626 | py | Python | migrations/versions/4432129ea292_.py | Brenda-M/food-booth | 8fbdb6be12653d18cd74b464af017e83f16a6feb | [
"MIT"
] | null | null | null | migrations/versions/4432129ea292_.py | Brenda-M/food-booth | 8fbdb6be12653d18cd74b464af017e83f16a6feb | [
"MIT"
] | null | null | null | migrations/versions/4432129ea292_.py | Brenda-M/food-booth | 8fbdb6be12653d18cd74b464af017e83f16a6feb | [
"MIT"
] | null | null | null | """empty message
Revision ID: 4432129ea292
Revises:
Create Date: 2020-05-13 18:27:44.141674
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4432129ea292'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('menus', sa.Column('price', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('menus', 'price')
# ### end Alembic commands ###
| 21.586207 | 75 | 0.680511 | from alembic import op
import sqlalchemy as sa
revision = '4432129ea292'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
| true | true |
f728714125957024001924ac6e16f83aff66e02c | 12,835 | py | Python | AutomatedTesting/Gem/PythonTests/editor/EditorScripts/AssetPicker_UI_UX.py | sandeel31/o3de | db88812d61eef77c6f4451b7f8c7605d6db07412 | [
"Apache-2.0",
"MIT"
] | 1 | 2022-03-12T14:13:45.000Z | 2022-03-12T14:13:45.000Z | AutomatedTesting/Gem/PythonTests/editor/EditorScripts/AssetPicker_UI_UX.py | sandeel31/o3de | db88812d61eef77c6f4451b7f8c7605d6db07412 | [
"Apache-2.0",
"MIT"
] | 2 | 2022-01-13T04:29:38.000Z | 2022-03-12T01:05:31.000Z | AutomatedTesting/Gem/PythonTests/editor/EditorScripts/AssetPicker_UI_UX.py | sandeel31/o3de | db88812d61eef77c6f4451b7f8c7605d6db07412 | [
"Apache-2.0",
"MIT"
] | null | null | null | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
"""
C13751579: Asset Picker UI/UX
"""
import os
import sys
from PySide2 import QtWidgets, QtTest, QtCore
from PySide2.QtCore import Qt
import azlmbr.asset as asset
import azlmbr.bus as bus
import azlmbr.legacy.general as general
import azlmbr.paths
import azlmbr.math as math
sys.path.append(os.path.join(azlmbr.paths.devroot, 'AutomatedTesting', 'Gem', 'PythonTests'))
import editor_python_test_tools.hydra_editor_utils as hydra
import editor_python_test_tools.pyside_utils as pyside_utils
from editor_python_test_tools.editor_test_helper import EditorTestHelper
class AssetPickerUIUXTest(EditorTestHelper):
def __init__(self):
EditorTestHelper.__init__(self, log_prefix="AssetPicker_UI_UX", args=["level"])
@pyside_utils.wrap_async
async def run_test(self):
"""
Summary:
Verify the functionality of Asset Picker and UI/UX properties
Expected Behavior:
The asset picker opens and is labeled appropriately ("Pick Model Asset" in this instance).
The Asset Picker window can be resized and moved around the screen.
The file tree expands/retracts appropriately and a scroll bar is present when the menus extend
beyond the length of the window.
The assets are limited to a valid type for the field selected (mesh assets in this instance)
The asset picker is closed and the selected asset is assigned to the mesh component.
Test Steps:
1) Open a new level
2) Create entity and add Mesh component
3) Access Entity Inspector
4) Click Asset Picker (Mesh Asset)
a) Collapse all the files initially and verify if scroll bar is not visible
b) Expand/Verify Top folder of file path
c) Expand/Verify Nested folder of file path
d) Verify if the ScrollBar appears after expanding folders
e) Collapse Nested and Top Level folders and verify if collapsed
f) Verify if the correct files are appearing in the Asset Picker
g) Move the widget and verify position
h) Resize the widget
g) Assign Mesh asset
5) Verify if Mesh Asset is assigned via both OK/Enter options
Note:
- This test file must be called from the Lumberyard Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
self.file_path = ["AutomatedTesting", "Assets", "Objects", "Foliage"]
self.incorrect_file_found = False
self.mesh_asset = "cedar.azmodel"
self.prefix = ""
def is_asset_assigned(component, interaction_option):
path = os.path.join("assets", "objects", "foliage", "cedar.azmodel")
expected_asset_id = asset.AssetCatalogRequestBus(bus.Broadcast, 'GetAssetIdByPath', path, math.Uuid(),
False)
result = hydra.get_component_property_value(component, "Controller|Configuration|Mesh Asset")
expected_asset_str = expected_asset_id.invoke("ToString")
result_str = result.invoke("ToString")
print(f"Asset assigned for {interaction_option} option: {expected_asset_str == result_str}")
return expected_asset_str == result_str
def move_and_resize_widget(widget):
# Move the widget and verify position
initial_position = widget.pos()
x, y = initial_position.x() + 5, initial_position.y() + 5
widget.move(x, y)
curr_position = widget.pos()
move_success = curr_position.x() == x and curr_position.y() == y
self.test_success = move_success and self.test_success
self.log(f"Widget Move Test: {move_success}")
# Resize the widget and verify size
width, height = (
widget.geometry().width() + 10,
widget.geometry().height() + 10,
)
widget.resize(width, height)
resize_success = widget.geometry().width() == width and widget.geometry().height() == height
self.test_success = resize_success and self.test_success
self.log(f"Widget Resize Test: {resize_success}")
def verify_files_appeared(model, allowed_asset_extensions, parent_index=QtCore.QModelIndex()):
indices = [parent_index]
while len(indices) > 0:
parent_index = indices.pop(0)
for row in range(model.rowCount(parent_index)):
cur_index = model.index(row, 0, parent_index)
cur_data = cur_index.data(Qt.DisplayRole)
if (
"." in cur_data
and (cur_data.lower().split(".")[-1] not in allowed_asset_extensions)
and not cur_data[-1] == ")"
):
print(f"Incorrect file found: {cur_data}")
self.incorrect_file_found = True
indices = list()
break
indices.append(cur_index)
self.test_success = not self.incorrect_file_found and self.test_success
def print_message_prefix(message):
print(f"{self.prefix}: {message}")
async def asset_picker(prefix, allowed_asset_extensions, asset, interaction_option):
active_modal_widget = await pyside_utils.wait_for_modal_widget()
if active_modal_widget and self.prefix == "":
self.prefix = prefix
dialog = active_modal_widget.findChildren(QtWidgets.QDialog, "AssetPickerDialogClass")[0]
print_message_prefix(f"Asset Picker title for Mesh: {dialog.windowTitle()}")
tree = dialog.findChildren(QtWidgets.QTreeView, "m_assetBrowserTreeViewWidget")[0]
scroll_area = tree.findChild(QtWidgets.QWidget, "qt_scrollarea_vcontainer")
scroll_bar = scroll_area.findChild(QtWidgets.QScrollBar)
# a) Collapse all the files initially and verify if scroll bar is not visible
tree.collapseAll()
await pyside_utils.wait_for_condition(lambda: not scroll_bar.isVisible(), 0.5)
print_message_prefix(
f"Scroll Bar is not visible before expanding the tree: {not scroll_bar.isVisible()}"
)
# Get Model Index of the file paths
model_index_1 = pyside_utils.find_child_by_pattern(tree, self.file_path[0])
print(model_index_1.model())
model_index_2 = pyside_utils.find_child_by_pattern(model_index_1, self.file_path[1])
# b) Expand/Verify Top folder of file path
print_message_prefix(f"Top level folder initially collapsed: {not tree.isExpanded(model_index_1)}")
tree.expand(model_index_1)
print_message_prefix(f"Top level folder expanded: {tree.isExpanded(model_index_1)}")
# c) Expand/Verify Nested folder of file path
print_message_prefix(f"Nested folder initially collapsed: {not tree.isExpanded(model_index_2)}")
tree.expand(model_index_2)
print_message_prefix(f"Nested folder expanded: {tree.isExpanded(model_index_2)}")
# d) Verify if the ScrollBar appears after expanding folders
tree.expandAll()
await pyside_utils.wait_for_condition(lambda: scroll_bar.isVisible(), 0.5)
print_message_prefix(f"Scroll Bar appeared after expanding tree: {scroll_bar.isVisible()}")
# e) Collapse Nested and Top Level folders and verify if collapsed
tree.collapse(model_index_2)
print_message_prefix(f"Nested folder collapsed: {not tree.isExpanded(model_index_2)}")
tree.collapse(model_index_1)
print_message_prefix(f"Top level folder collapsed: {not tree.isExpanded(model_index_1)}")
# f) Verify if the correct files are appearing in the Asset Picker
verify_files_appeared(tree.model(), allowed_asset_extensions)
print_message_prefix(f"Expected Assets populated in the file picker: {not self.incorrect_file_found}")
# While we are here we can also check if we can resize and move the widget
move_and_resize_widget(active_modal_widget)
# g) Assign asset
tree.collapseAll()
await pyside_utils.wait_for_condition(
lambda: len(dialog.findChildren(QtWidgets.QFrame, "m_searchWidget")) > 0, 0.5)
search_widget = dialog.findChildren(QtWidgets.QFrame, "m_searchWidget")[0]
search_line_edit = search_widget.findChildren(QtWidgets.QLineEdit, "textSearch")[0]
search_line_edit.setText(asset)
tree.expandAll()
asset_model_index = pyside_utils.find_child_by_pattern(tree, asset)
await pyside_utils.wait_for_condition(lambda: asset_model_index.isValid(), 2.0)
tree.expand(asset_model_index)
tree.setCurrentIndex(asset_model_index)
if interaction_option == "ok":
button_box = dialog.findChild(QtWidgets.QDialogButtonBox, "m_buttonBox")
ok_button = button_box.button(QtWidgets.QDialogButtonBox.Ok)
await pyside_utils.click_button_async(ok_button)
elif interaction_option == "enter":
QtTest.QTest.keyClick(tree, Qt.Key_Enter, Qt.NoModifier)
self.prefix = ""
# 1) Open a new level
self.test_success = self.create_level(
self.args["level"],
heightmap_resolution=1024,
heightmap_meters_per_pixel=1,
terrain_texture_resolution=4096,
use_terrain=False,
)
# 2) Create entity and add Mesh component
entity_position = math.Vector3(125.0, 136.0, 32.0)
entity = hydra.Entity("TestEntity")
entity.create_entity(entity_position, ["Mesh"])
# 3) Access Entity Inspector
editor_window = pyside_utils.get_editor_main_window()
entity_inspector = editor_window.findChild(QtWidgets.QDockWidget, "Entity Inspector")
component_list_widget = entity_inspector.findChild(QtWidgets.QWidget, "m_componentListContents")
# 4) Click on Asset Picker (Mesh Asset)
general.select_object("TestEntity")
general.idle_wait(0.5)
mesh_asset = component_list_widget.findChildren(QtWidgets.QFrame, "Mesh Asset")[0]
attached_button = mesh_asset.findChildren(QtWidgets.QPushButton, "attached-button")[0]
# Assign Mesh Asset via OK button
pyside_utils.click_button_async(attached_button)
await asset_picker("Mesh Asset", ["azmodel", "fbx"], "cedar (ModelAsset)", "ok")
# 5) Verify if Mesh Asset is assigned
try:
mesh_success = await pyside_utils.wait_for_condition(lambda: is_asset_assigned(entity.components[0],
"ok"))
except pyside_utils.EventLoopTimeoutException as err:
print(err)
mesh_success = False
self.test_success = mesh_success and self.test_success
# Clear Mesh Asset
hydra.get_set_test(entity, 0, "Controller|Configuration|Mesh Asset", None)
general.select_object("TestEntity")
general.idle_wait(0.5)
mesh_asset = component_list_widget.findChildren(QtWidgets.QFrame, "Mesh Asset")[0]
attached_button = mesh_asset.findChildren(QtWidgets.QPushButton, "attached-button")[0]
# Assign Mesh Asset via Enter
pyside_utils.click_button_async(attached_button)
await asset_picker("Mesh Asset", ["azmodel", "fbx"], "cedar (ModelAsset)", "enter")
# 5) Verify if Mesh Asset is assigned
try:
mesh_success = await pyside_utils.wait_for_condition(lambda: is_asset_assigned(entity.components[0],
"enter"))
except pyside_utils.EventLoopTimeoutException as err:
print(err)
mesh_success = False
self.test_success = mesh_success and self.test_success
test = AssetPickerUIUXTest()
test.run()
| 49.555985 | 118 | 0.634671 |
import os
import sys
from PySide2 import QtWidgets, QtTest, QtCore
from PySide2.QtCore import Qt
import azlmbr.asset as asset
import azlmbr.bus as bus
import azlmbr.legacy.general as general
import azlmbr.paths
import azlmbr.math as math
sys.path.append(os.path.join(azlmbr.paths.devroot, 'AutomatedTesting', 'Gem', 'PythonTests'))
import editor_python_test_tools.hydra_editor_utils as hydra
import editor_python_test_tools.pyside_utils as pyside_utils
from editor_python_test_tools.editor_test_helper import EditorTestHelper
class AssetPickerUIUXTest(EditorTestHelper):
def __init__(self):
EditorTestHelper.__init__(self, log_prefix="AssetPicker_UI_UX", args=["level"])
@pyside_utils.wrap_async
async def run_test(self):
self.file_path = ["AutomatedTesting", "Assets", "Objects", "Foliage"]
self.incorrect_file_found = False
self.mesh_asset = "cedar.azmodel"
self.prefix = ""
def is_asset_assigned(component, interaction_option):
path = os.path.join("assets", "objects", "foliage", "cedar.azmodel")
expected_asset_id = asset.AssetCatalogRequestBus(bus.Broadcast, 'GetAssetIdByPath', path, math.Uuid(),
False)
result = hydra.get_component_property_value(component, "Controller|Configuration|Mesh Asset")
expected_asset_str = expected_asset_id.invoke("ToString")
result_str = result.invoke("ToString")
print(f"Asset assigned for {interaction_option} option: {expected_asset_str == result_str}")
return expected_asset_str == result_str
def move_and_resize_widget(widget):
initial_position = widget.pos()
x, y = initial_position.x() + 5, initial_position.y() + 5
widget.move(x, y)
curr_position = widget.pos()
move_success = curr_position.x() == x and curr_position.y() == y
self.test_success = move_success and self.test_success
self.log(f"Widget Move Test: {move_success}")
width, height = (
widget.geometry().width() + 10,
widget.geometry().height() + 10,
)
widget.resize(width, height)
resize_success = widget.geometry().width() == width and widget.geometry().height() == height
self.test_success = resize_success and self.test_success
self.log(f"Widget Resize Test: {resize_success}")
def verify_files_appeared(model, allowed_asset_extensions, parent_index=QtCore.QModelIndex()):
indices = [parent_index]
while len(indices) > 0:
parent_index = indices.pop(0)
for row in range(model.rowCount(parent_index)):
cur_index = model.index(row, 0, parent_index)
cur_data = cur_index.data(Qt.DisplayRole)
if (
"." in cur_data
and (cur_data.lower().split(".")[-1] not in allowed_asset_extensions)
and not cur_data[-1] == ")"
):
print(f"Incorrect file found: {cur_data}")
self.incorrect_file_found = True
indices = list()
break
indices.append(cur_index)
self.test_success = not self.incorrect_file_found and self.test_success
def print_message_prefix(message):
print(f"{self.prefix}: {message}")
async def asset_picker(prefix, allowed_asset_extensions, asset, interaction_option):
active_modal_widget = await pyside_utils.wait_for_modal_widget()
if active_modal_widget and self.prefix == "":
self.prefix = prefix
dialog = active_modal_widget.findChildren(QtWidgets.QDialog, "AssetPickerDialogClass")[0]
print_message_prefix(f"Asset Picker title for Mesh: {dialog.windowTitle()}")
tree = dialog.findChildren(QtWidgets.QTreeView, "m_assetBrowserTreeViewWidget")[0]
scroll_area = tree.findChild(QtWidgets.QWidget, "qt_scrollarea_vcontainer")
scroll_bar = scroll_area.findChild(QtWidgets.QScrollBar)
tree.collapseAll()
await pyside_utils.wait_for_condition(lambda: not scroll_bar.isVisible(), 0.5)
print_message_prefix(
f"Scroll Bar is not visible before expanding the tree: {not scroll_bar.isVisible()}"
)
model_index_1 = pyside_utils.find_child_by_pattern(tree, self.file_path[0])
print(model_index_1.model())
model_index_2 = pyside_utils.find_child_by_pattern(model_index_1, self.file_path[1])
print_message_prefix(f"Top level folder initially collapsed: {not tree.isExpanded(model_index_1)}")
tree.expand(model_index_1)
print_message_prefix(f"Top level folder expanded: {tree.isExpanded(model_index_1)}")
print_message_prefix(f"Nested folder initially collapsed: {not tree.isExpanded(model_index_2)}")
tree.expand(model_index_2)
print_message_prefix(f"Nested folder expanded: {tree.isExpanded(model_index_2)}")
tree.expandAll()
await pyside_utils.wait_for_condition(lambda: scroll_bar.isVisible(), 0.5)
print_message_prefix(f"Scroll Bar appeared after expanding tree: {scroll_bar.isVisible()}")
tree.collapse(model_index_2)
print_message_prefix(f"Nested folder collapsed: {not tree.isExpanded(model_index_2)}")
tree.collapse(model_index_1)
print_message_prefix(f"Top level folder collapsed: {not tree.isExpanded(model_index_1)}")
verify_files_appeared(tree.model(), allowed_asset_extensions)
print_message_prefix(f"Expected Assets populated in the file picker: {not self.incorrect_file_found}")
move_and_resize_widget(active_modal_widget)
tree.collapseAll()
await pyside_utils.wait_for_condition(
lambda: len(dialog.findChildren(QtWidgets.QFrame, "m_searchWidget")) > 0, 0.5)
search_widget = dialog.findChildren(QtWidgets.QFrame, "m_searchWidget")[0]
search_line_edit = search_widget.findChildren(QtWidgets.QLineEdit, "textSearch")[0]
search_line_edit.setText(asset)
tree.expandAll()
asset_model_index = pyside_utils.find_child_by_pattern(tree, asset)
await pyside_utils.wait_for_condition(lambda: asset_model_index.isValid(), 2.0)
tree.expand(asset_model_index)
tree.setCurrentIndex(asset_model_index)
if interaction_option == "ok":
button_box = dialog.findChild(QtWidgets.QDialogButtonBox, "m_buttonBox")
ok_button = button_box.button(QtWidgets.QDialogButtonBox.Ok)
await pyside_utils.click_button_async(ok_button)
elif interaction_option == "enter":
QtTest.QTest.keyClick(tree, Qt.Key_Enter, Qt.NoModifier)
self.prefix = ""
self.test_success = self.create_level(
self.args["level"],
heightmap_resolution=1024,
heightmap_meters_per_pixel=1,
terrain_texture_resolution=4096,
use_terrain=False,
)
entity_position = math.Vector3(125.0, 136.0, 32.0)
entity = hydra.Entity("TestEntity")
entity.create_entity(entity_position, ["Mesh"])
editor_window = pyside_utils.get_editor_main_window()
entity_inspector = editor_window.findChild(QtWidgets.QDockWidget, "Entity Inspector")
component_list_widget = entity_inspector.findChild(QtWidgets.QWidget, "m_componentListContents")
general.select_object("TestEntity")
general.idle_wait(0.5)
mesh_asset = component_list_widget.findChildren(QtWidgets.QFrame, "Mesh Asset")[0]
attached_button = mesh_asset.findChildren(QtWidgets.QPushButton, "attached-button")[0]
pyside_utils.click_button_async(attached_button)
await asset_picker("Mesh Asset", ["azmodel", "fbx"], "cedar (ModelAsset)", "ok")
try:
mesh_success = await pyside_utils.wait_for_condition(lambda: is_asset_assigned(entity.components[0],
"ok"))
except pyside_utils.EventLoopTimeoutException as err:
print(err)
mesh_success = False
self.test_success = mesh_success and self.test_success
hydra.get_set_test(entity, 0, "Controller|Configuration|Mesh Asset", None)
general.select_object("TestEntity")
general.idle_wait(0.5)
mesh_asset = component_list_widget.findChildren(QtWidgets.QFrame, "Mesh Asset")[0]
attached_button = mesh_asset.findChildren(QtWidgets.QPushButton, "attached-button")[0]
pyside_utils.click_button_async(attached_button)
await asset_picker("Mesh Asset", ["azmodel", "fbx"], "cedar (ModelAsset)", "enter")
try:
mesh_success = await pyside_utils.wait_for_condition(lambda: is_asset_assigned(entity.components[0],
"enter"))
except pyside_utils.EventLoopTimeoutException as err:
print(err)
mesh_success = False
self.test_success = mesh_success and self.test_success
test = AssetPickerUIUXTest()
test.run()
| true | true |
f728727e32c5dfe9829d0c741c9d10726176a45c | 2,426 | py | Python | scrape_scripts/textScrape.py | bmcmenamin/word2vec_advice | 69dbde89b26b80d10f778147f2e3abe1628d6e05 | [
"MIT"
] | null | null | null | scrape_scripts/textScrape.py | bmcmenamin/word2vec_advice | 69dbde89b26b80d10f778147f2e3abe1628d6e05 | [
"MIT"
] | null | null | null | scrape_scripts/textScrape.py | bmcmenamin/word2vec_advice | 69dbde89b26b80d10f778147f2e3abe1628d6e05 | [
"MIT"
] | null | null | null | #!/Users/mcmenamin/.virtualenvs/py3env/bin/python
from lxml import html
import requests
from datetime import date
import numpy as np
import pandas as pd
import re as re
from itertools import chain
import pickle
from tqdm import tqdm
def getURLforYear(year, archiveURL='http://www.uexpress.com/dearabby/archives'):
archive = requests.get('{0}/{1}'.format(archiveURL, year))
tree = html.fromstring(archive.text)
urlList = [a.attrib['href'] for a in tree.find_class('media-link-main')]
return urlList
def scrape_page(extURL, baseURL='http://www.uexpress.com/'):
page = requests.get('{0}{1}'.format(baseURL, extURL))
tree = html.fromstring(page.text)
questions = tree.find_class('item-section')
allQ = []
for q in questions:
qText = [i.text_content() for i in q.iterfind('p')]
allQ += qText
allQ = ' '.join(allQ)
return allQ
def parseAbby(block):
block = block.strip().split('DEAR ')
abbyBlock = [p.startswith('ABBY:') for p in block]
dearReaderBlock = [p.startswith('READERS:') for p in block]
replyBlock = [not (p[0] or p[1]) for p in zip(abbyBlock, dearReaderBlock)]
QA_pairs = []
if True in abbyBlock and True in replyBlock:
firstBlock = abbyBlock.index(True)
block = block[firstBlock:]
abbyBlock = abbyBlock[firstBlock:]
dearReaderBlock = dearReaderBlock[firstBlock:]
replyBlock = replyBlock[firstBlock:]
for i in range(len(block)-1):
if abbyBlock[i] and replyBlock[i+1]:
QA_pairs.append([block[i], block[i+1]])
return QA_pairs
#
# Get an iterator of URLs from archives for a specific date range
#
archivedURLs = list(chain.from_iterable([getURLforYear(y) for y in range(1991,2017+1)]))
#
# Pull in the text from each archived URL
#
all_text_dict = {}
for url in tqdm(archivedURLs):
raw_text = scrape_page(url)
all_text_dict[url] = {'path': url,
'date': date(*[int(i) for i in url.split('/')[2:5]]),
'raw_text': raw_text,
'parse_text': parseAbby(raw_text)
}
df_text = pd.DataFrame.from_dict(all_text_dict, orient='index')
df_text.to_pickle('abbyText.pickle')
df_text.to_json('abbyText.json',
lines=True,
orient='records',
force_ascii=True
)
| 28.880952 | 88 | 0.623248 |
from lxml import html
import requests
from datetime import date
import numpy as np
import pandas as pd
import re as re
from itertools import chain
import pickle
from tqdm import tqdm
def getURLforYear(year, archiveURL='http://www.uexpress.com/dearabby/archives'):
archive = requests.get('{0}/{1}'.format(archiveURL, year))
tree = html.fromstring(archive.text)
urlList = [a.attrib['href'] for a in tree.find_class('media-link-main')]
return urlList
def scrape_page(extURL, baseURL='http://www.uexpress.com/'):
page = requests.get('{0}{1}'.format(baseURL, extURL))
tree = html.fromstring(page.text)
questions = tree.find_class('item-section')
allQ = []
for q in questions:
qText = [i.text_content() for i in q.iterfind('p')]
allQ += qText
allQ = ' '.join(allQ)
return allQ
def parseAbby(block):
block = block.strip().split('DEAR ')
abbyBlock = [p.startswith('ABBY:') for p in block]
dearReaderBlock = [p.startswith('READERS:') for p in block]
replyBlock = [not (p[0] or p[1]) for p in zip(abbyBlock, dearReaderBlock)]
QA_pairs = []
if True in abbyBlock and True in replyBlock:
firstBlock = abbyBlock.index(True)
block = block[firstBlock:]
abbyBlock = abbyBlock[firstBlock:]
dearReaderBlock = dearReaderBlock[firstBlock:]
replyBlock = replyBlock[firstBlock:]
for i in range(len(block)-1):
if abbyBlock[i] and replyBlock[i+1]:
QA_pairs.append([block[i], block[i+1]])
return QA_pairs
archivedURLs = list(chain.from_iterable([getURLforYear(y) for y in range(1991,2017+1)]))
all_text_dict = {}
for url in tqdm(archivedURLs):
raw_text = scrape_page(url)
all_text_dict[url] = {'path': url,
'date': date(*[int(i) for i in url.split('/')[2:5]]),
'raw_text': raw_text,
'parse_text': parseAbby(raw_text)
}
df_text = pd.DataFrame.from_dict(all_text_dict, orient='index')
df_text.to_pickle('abbyText.pickle')
df_text.to_json('abbyText.json',
lines=True,
orient='records',
force_ascii=True
)
| true | true |
f72872f61f7ef7659da0b278733dd6073c996977 | 22,691 | py | Python | tests/integration/test_process.py | martinskans/mapillary_tools | ac15996ce4d2a499e6d9ba10af5c23126e535222 | [
"BSD-2-Clause"
] | null | null | null | tests/integration/test_process.py | martinskans/mapillary_tools | ac15996ce4d2a499e6d9ba10af5c23126e535222 | [
"BSD-2-Clause"
] | null | null | null | tests/integration/test_process.py | martinskans/mapillary_tools | ac15996ce4d2a499e6d9ba10af5c23126e535222 | [
"BSD-2-Clause"
] | null | null | null | import json
import os
import subprocess
import zipfile
import hashlib
import pytest
import py.path
import exifread
EXECUTABLE = os.getenv("MAPILLARY_TOOLS_EXECUTABLE", "python3 -m mapillary_tools")
IMPORT_PATH = "tests/integration/mapillary_tools_process_images_provider/data"
USERNAME = "test_username_MAKE_SURE_IT_IS_UNIQUE_AND_LONG_AND_BORING"
CONFIG_CONTENT = f"""
[{USERNAME}]
MAPSettingsUsername = {USERNAME}
MAPSettingsUserKey = test_user_key
user_upload_token = test_user_token
"""
@pytest.fixture
def setup_config(tmpdir: py.path.local):
config_path = tmpdir.mkdir("configs").join("CLIENT_ID")
with open(config_path, "w") as fp:
fp.write(CONFIG_CONTENT)
yield config_path
if tmpdir.check():
tmpdir.remove(ignore_errors=True)
@pytest.fixture
def setup_data(tmpdir: py.path.local):
data_path = tmpdir.mkdir("data")
source = py.path.local(IMPORT_PATH)
source.copy(data_path)
yield data_path
if tmpdir.check():
tmpdir.remove(ignore_errors=True)
def test_basic():
for option in ["--version", "--help"]:
x = subprocess.run(f"{EXECUTABLE} {option}", shell=True)
assert x.returncode == 0, x.stderr
def test_process(setup_data: py.path.local):
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = os.path.join(setup_data, "mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
for desc in descs:
assert "filename" in desc
assert os.path.isfile(os.path.join(setup_data, desc["filename"]))
def validate_and_extract_zip(filename: str):
basename = os.path.basename(filename)
assert basename.startswith("mly_tools_"), filename
assert basename.endswith(".zip"), filename
ret = {}
import tempfile
with zipfile.ZipFile(filename) as zipf:
with tempfile.TemporaryDirectory() as tempdir:
zipf.extractall(path=tempdir)
for name in os.listdir(tempdir):
with open(os.path.join(tempdir, name), "rb") as fp:
tags = exifread.process_file(fp)
desc_tag = tags.get("Image ImageDescription")
assert desc_tag is not None, tags
desc = json.loads(str(desc_tag.values))
assert isinstance(desc.get("MAPLatitude"), (float, int)), desc
assert isinstance(desc.get("MAPLongitude"), (float, int)), desc
assert isinstance(desc.get("MAPCaptureTime"), str), desc
assert isinstance(desc.get("MAPCompassHeading"), dict), desc
for key in desc.keys():
assert key.startswith("MAP"), key
ret[name] = desc
return ret
def test_zip(tmpdir: py.path.local, setup_data: py.path.local):
zip_dir = tmpdir.mkdir("zip_dir")
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
x = subprocess.run(
f"{EXECUTABLE} zip {setup_data} {zip_dir}",
shell=True,
)
assert x.returncode == 0, x.stderr
assert 0 < len(zip_dir.listdir())
for file in zip_dir.listdir():
validate_and_extract_zip(str(file))
def test_upload_image_dir(
tmpdir: py.path.local, setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
x = subprocess.run(
f"{EXECUTABLE} upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
assert x.returncode == 0, x.stderr
def test_upload_image_dir_twice(
tmpdir: py.path.local, setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
md5sum_map = {}
# first upload
x = subprocess.run(
f"{EXECUTABLE} upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
md5sum_map[os.path.basename(file)] = file_md5sum(file)
# expect the second upload to not produce new uploads
x = subprocess.run(
f"{EXECUTABLE} upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
new_md5sum = file_md5sum(file)
assert md5sum_map[os.path.basename(file)] == new_md5sum
assert len(md5sum_map) == len(upload_dir.listdir())
def test_upload_zip(
tmpdir: py.path.local, setup_data: py.path.local, setup_config: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
zip_dir = tmpdir.mkdir("zip_dir")
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
x = subprocess.run(
f"{EXECUTABLE} zip {setup_data} {zip_dir}",
shell=True,
)
assert x.returncode == 0, x.stderr
for zfile in zip_dir.listdir():
x = subprocess.run(
f"{EXECUTABLE} upload {zfile} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
def test_process_and_upload(
tmpdir: py.path.local, setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
x = subprocess.run(
f"{EXECUTABLE} process_and_upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
def test_time(setup_data: py.path.local):
# before offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": "2018_06_08_13_24_10_000",
"DSC00497.JPG": "2018_06_08_13_32_28_000",
"V0370574.JPG": "2018_07_27_11_32_14_000",
}
for desc in descs:
assert "filename" in desc
assert expected[desc["filename"]] == desc["MAPCaptureTime"]
# after offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --offset_time=2.5",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": "2018_06_08_13_24_12_500",
"DSC00497.JPG": "2018_06_08_13_32_30_500",
"V0370574.JPG": "2018_07_27_11_32_16_500",
}
for desc in descs:
assert "filename" in desc
assert expected[desc["filename"]] == desc["MAPCaptureTime"]
# after offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --offset_time=-1.0",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": "2018_06_08_13_24_09_000",
"DSC00497.JPG": "2018_06_08_13_32_27_000",
"V0370574.JPG": "2018_07_27_11_32_13_000",
}
for desc in descs:
assert "filename" in desc
assert expected[desc["filename"]] == desc["MAPCaptureTime"]
def test_angle(setup_data: py.path.local):
# before offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": 270.89,
"DSC00497.JPG": 271.27,
"V0370574.JPG": 359.0,
}
for desc in descs:
assert "filename" in desc
assert (
abs(expected[desc["filename"]] - desc["MAPCompassHeading"]["TrueHeading"])
< 0.00001
)
assert (
abs(
expected[desc["filename"]]
- desc["MAPCompassHeading"]["MagneticHeading"]
)
< 0.00001
)
# after offset
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --offset_angle=2.5",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": 270.89 + 2.5,
"DSC00497.JPG": 271.27 + 2.5,
"V0370574.JPG": 1.5,
}
for desc in descs:
assert "filename" in desc
assert (
abs(expected[desc["filename"]] - desc["MAPCompassHeading"]["TrueHeading"])
< 0.00001
)
assert (
abs(
expected[desc["filename"]]
- desc["MAPCompassHeading"]["MagneticHeading"]
)
< 0.00001
)
def test_process_boolean_options(
setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
boolean_options = [
"--add_file_name",
"--add_import_date",
"--exclude_import_path",
"--interpolate_directions",
"--overwrite_EXIF_direction_tag",
"--overwrite_EXIF_gps_tag",
"--overwrite_EXIF_orientation_tag",
"--overwrite_EXIF_time_tag",
"--overwrite_all_EXIF_tags",
"--skip_subfolders",
"--windows_path",
]
for option in boolean_options:
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} {option}",
shell=True,
)
assert x.returncode == 0, x.stderr
all_options = " ".join(boolean_options)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} {all_options}",
shell=True,
)
assert x.returncode == 0, x.stderr
GPX_CONTENT = """
<gpx>
<trk>
<name>Mapillary GPX</name>
<trkseg>
<trkpt lat="0.02" lon="0.01">
<ele>1</ele>
<time>2018-06-08T13:23:34.805</time>
</trkpt>
<trkpt lat="2.02" lon="0.01">
<ele>2</ele>
<time>2018-06-08T13:24:35.809</time>
</trkpt>
<trkpt lat="2.02" lon="2.01">
<ele>4</ele>
<time>2018-06-08T13:33:36.813</time>
</trkpt>
<trkpt lat="4.02" lon="2.01">
<ele>9</ele>
<time>2018-06-08T13:58:37.812</time>
</trkpt>
</trkseg>
</trk>
</gpx>
"""
def find_desc_errors(descs):
return [desc for desc in descs if "error" in desc]
def filter_out_errors(descs):
return [desc for desc in descs if "error" not in desc]
def test_geotagging_from_gpx(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("mapillary_image_description.json")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --geotag_source_path {gpx_file} --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
# capture_time, lon, lat, elevation
"DSC00001.JPG": [
"2018_06_08_13_24_10_000",
0.01,
1.1738587633597797,
1.5769293816798897,
],
"DSC00497.JPG": [
"2018_06_08_13_32_28_000",
1.7556100139740183,
2.02,
3.7456100139740185,
],
}
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def test_geotagging_from_gpx_with_offset(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("mapillary_image_description.json")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --geotag_source_path {gpx_file} --interpolation_offset_time=-20 --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
# capture_time, lon, lat, elevation
"DSC00001.JPG": [
"2018_06_08_13_23_50_000",
0.01,
0.5181640548160776,
1.2490820274080388,
],
"DSC00497.JPG": [
"2018_06_08_13_32_08_000",
1.6816734072206487,
2.02,
3.671673407220649,
],
}
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def test_geotagging_from_gpx_use_gpx_start_time(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --interpolation_use_gpx_start_time --geotag_source_path {gpx_file} --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
# capture_time, lon, lat, elevation
"DSC00001.JPG": ["2018_06_08_13_23_34_805", 0.01, 0.02, 1.0],
"DSC00497.JPG": [
"2018_06_08_13_31_52_805",
1.6255000702397762,
2.02,
3.6155000702397766,
],
}
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def test_geotagging_from_gpx_use_gpx_start_time_with_offset(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --interpolation_use_gpx_start_time --geotag_source_path {gpx_file} --interpolation_offset_time=100 --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
# capture_time, lon, lat, elevation
"DSC00001.JPG": [
"2018_06_08_13_25_14_805",
0.15416159584772016,
2.02,
2.14416159584772,
],
"DSC00497.JPG": [
"2018_06_08_13_33_32_805",
1.9951831040066244,
2.02,
3.985183104006625,
],
}
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def ffmpeg_installed():
ffmpeg_path = os.getenv("MAPILLARY_FFMPEG_PATH", "ffmpeg")
try:
subprocess.run([ffmpeg_path, "-version"])
except FileNotFoundError:
return False
return True
is_ffmpeg_installed = ffmpeg_installed()
def test_sample_video(setup_data: py.path.local):
if not is_ffmpeg_installed:
pytest.skip("skip because ffmpeg not installed")
for input_path in [setup_data, setup_data.join("sample-5s.mp4")]:
x = subprocess.run(
f"{EXECUTABLE} sample_video --rerun {input_path}",
shell=True,
)
assert x.returncode != 0, x.stderr
assert len(setup_data.join("mapillary_sampled_video_frames").listdir()) == 0
x = subprocess.run(
f"{EXECUTABLE} sample_video --skip_sample_errors --rerun {input_path}",
shell=True,
)
assert x.returncode == 0, x.stderr
assert len(setup_data.join("mapillary_sampled_video_frames").listdir()) == 0
x = subprocess.run(
f"{EXECUTABLE} sample_video --video_start_time 2021_10_10_10_10_10_123 --rerun {input_path}",
shell=True,
)
assert x.returncode == 0, x.stderr
sample_path = setup_data.join("mapillary_sampled_video_frames")
assert len(sample_path.listdir()) == 1
samples = sample_path.join("sample-5s.mp4").listdir()
samples.sort()
times = []
for s in samples:
with s.open("rb") as fp:
tags = exifread.process_file(fp)
times.append(tags["EXIF DateTimeOriginal"].values)
assert (
"2021:10:10 10:10:10.123",
"2021:10:10 10:10:12.123",
"2021:10:10 10:10:14.123",
) == tuple(times)
def test_video_process(setup_data: py.path.local):
if not is_ffmpeg_installed:
pytest.skip("skip because ffmpeg not installed")
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("my_samples").join("mapillary_image_description.json")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} video_process --video_start_time 2018_06_08_13_23_34_123 --geotag_source gpx --geotag_source_path {gpx_file} {setup_data} {setup_data.join('my_samples')}",
shell=True,
)
assert x.returncode != 0, x.stderr
with open(desc_path) as fp:
descs = json.load(fp)
assert 1 == len(find_desc_errors(descs))
assert 2 == len(filter_out_errors(descs))
def test_video_process_multiple_videos(setup_data: py.path.local):
if not is_ffmpeg_installed:
pytest.skip("skip because ffmpeg not installed")
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("my_samples").join("mapillary_image_description.json")
sub_folder = setup_data.join("video_sub_folder").mkdir()
video_path = setup_data.join("sample-5s.mp4")
video_path.copy(sub_folder)
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} video_process --video_start_time 2018_06_08_13_23_34_123 --geotag_source gpx --geotag_source_path {gpx_file} {video_path} {setup_data.join('my_samples')}",
shell=True,
)
assert x.returncode != 0, x.stderr
with open(desc_path) as fp:
descs = json.load(fp)
for d in descs:
assert d["filename"].startswith("sample-5s.mp4/")
assert 1 == len(find_desc_errors(descs))
assert 2 == len(filter_out_errors(descs))
def file_md5sum(path) -> str:
with open(path, "rb") as fp:
md5 = hashlib.md5()
while True:
buf = fp.read(1024 * 1024 * 32)
if not buf:
break
md5.update(buf)
return md5.hexdigest()
def test_upload_mp4(
tmpdir: py.path.local, setup_data: py.path.local, setup_config: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
video_path = setup_data.join("sample-5s.mp4")
md5sum = file_md5sum(video_path)
x = subprocess.run(
f"{EXECUTABLE} upload {video_path} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
assert 1 == len(upload_dir.listdir())
assert {"mly_tools_8cd0e9af15f4baaafe9dfe98ace8b886.mp4"} == {
os.path.basename(f) for f in upload_dir.listdir()
}
assert {md5sum} == {file_md5sum(f) for f in upload_dir.listdir()}
| 33.369118 | 186 | 0.623287 | import json
import os
import subprocess
import zipfile
import hashlib
import pytest
import py.path
import exifread
EXECUTABLE = os.getenv("MAPILLARY_TOOLS_EXECUTABLE", "python3 -m mapillary_tools")
IMPORT_PATH = "tests/integration/mapillary_tools_process_images_provider/data"
USERNAME = "test_username_MAKE_SURE_IT_IS_UNIQUE_AND_LONG_AND_BORING"
CONFIG_CONTENT = f"""
[{USERNAME}]
MAPSettingsUsername = {USERNAME}
MAPSettingsUserKey = test_user_key
user_upload_token = test_user_token
"""
@pytest.fixture
def setup_config(tmpdir: py.path.local):
config_path = tmpdir.mkdir("configs").join("CLIENT_ID")
with open(config_path, "w") as fp:
fp.write(CONFIG_CONTENT)
yield config_path
if tmpdir.check():
tmpdir.remove(ignore_errors=True)
@pytest.fixture
def setup_data(tmpdir: py.path.local):
data_path = tmpdir.mkdir("data")
source = py.path.local(IMPORT_PATH)
source.copy(data_path)
yield data_path
if tmpdir.check():
tmpdir.remove(ignore_errors=True)
def test_basic():
for option in ["--version", "--help"]:
x = subprocess.run(f"{EXECUTABLE} {option}", shell=True)
assert x.returncode == 0, x.stderr
def test_process(setup_data: py.path.local):
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = os.path.join(setup_data, "mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
for desc in descs:
assert "filename" in desc
assert os.path.isfile(os.path.join(setup_data, desc["filename"]))
def validate_and_extract_zip(filename: str):
basename = os.path.basename(filename)
assert basename.startswith("mly_tools_"), filename
assert basename.endswith(".zip"), filename
ret = {}
import tempfile
with zipfile.ZipFile(filename) as zipf:
with tempfile.TemporaryDirectory() as tempdir:
zipf.extractall(path=tempdir)
for name in os.listdir(tempdir):
with open(os.path.join(tempdir, name), "rb") as fp:
tags = exifread.process_file(fp)
desc_tag = tags.get("Image ImageDescription")
assert desc_tag is not None, tags
desc = json.loads(str(desc_tag.values))
assert isinstance(desc.get("MAPLatitude"), (float, int)), desc
assert isinstance(desc.get("MAPLongitude"), (float, int)), desc
assert isinstance(desc.get("MAPCaptureTime"), str), desc
assert isinstance(desc.get("MAPCompassHeading"), dict), desc
for key in desc.keys():
assert key.startswith("MAP"), key
ret[name] = desc
return ret
def test_zip(tmpdir: py.path.local, setup_data: py.path.local):
zip_dir = tmpdir.mkdir("zip_dir")
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
x = subprocess.run(
f"{EXECUTABLE} zip {setup_data} {zip_dir}",
shell=True,
)
assert x.returncode == 0, x.stderr
assert 0 < len(zip_dir.listdir())
for file in zip_dir.listdir():
validate_and_extract_zip(str(file))
def test_upload_image_dir(
tmpdir: py.path.local, setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
x = subprocess.run(
f"{EXECUTABLE} upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
assert x.returncode == 0, x.stderr
def test_upload_image_dir_twice(
tmpdir: py.path.local, setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
md5sum_map = {}
x = subprocess.run(
f"{EXECUTABLE} upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
md5sum_map[os.path.basename(file)] = file_md5sum(file)
x = subprocess.run(
f"{EXECUTABLE} upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
new_md5sum = file_md5sum(file)
assert md5sum_map[os.path.basename(file)] == new_md5sum
assert len(md5sum_map) == len(upload_dir.listdir())
def test_upload_zip(
tmpdir: py.path.local, setup_data: py.path.local, setup_config: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
zip_dir = tmpdir.mkdir("zip_dir")
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
x = subprocess.run(
f"{EXECUTABLE} zip {setup_data} {zip_dir}",
shell=True,
)
assert x.returncode == 0, x.stderr
for zfile in zip_dir.listdir():
x = subprocess.run(
f"{EXECUTABLE} upload {zfile} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
def test_process_and_upload(
tmpdir: py.path.local, setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
x = subprocess.run(
f"{EXECUTABLE} process_and_upload {setup_data} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
for file in upload_dir.listdir():
validate_and_extract_zip(str(file))
def test_time(setup_data: py.path.local):
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": "2018_06_08_13_24_10_000",
"DSC00497.JPG": "2018_06_08_13_32_28_000",
"V0370574.JPG": "2018_07_27_11_32_14_000",
}
for desc in descs:
assert "filename" in desc
assert expected[desc["filename"]] == desc["MAPCaptureTime"]
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --offset_time=2.5",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": "2018_06_08_13_24_12_500",
"DSC00497.JPG": "2018_06_08_13_32_30_500",
"V0370574.JPG": "2018_07_27_11_32_16_500",
}
for desc in descs:
assert "filename" in desc
assert expected[desc["filename"]] == desc["MAPCaptureTime"]
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --offset_time=-1.0",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": "2018_06_08_13_24_09_000",
"DSC00497.JPG": "2018_06_08_13_32_27_000",
"V0370574.JPG": "2018_07_27_11_32_13_000",
}
for desc in descs:
assert "filename" in desc
assert expected[desc["filename"]] == desc["MAPCaptureTime"]
def test_angle(setup_data: py.path.local):
x = subprocess.run(
f"{EXECUTABLE} process {setup_data}",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": 270.89,
"DSC00497.JPG": 271.27,
"V0370574.JPG": 359.0,
}
for desc in descs:
assert "filename" in desc
assert (
abs(expected[desc["filename"]] - desc["MAPCompassHeading"]["TrueHeading"])
< 0.00001
)
assert (
abs(
expected[desc["filename"]]
- desc["MAPCompassHeading"]["MagneticHeading"]
)
< 0.00001
)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --offset_angle=2.5",
shell=True,
)
assert x.returncode == 0, x.stderr
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
expected = {
"DSC00001.JPG": 270.89 + 2.5,
"DSC00497.JPG": 271.27 + 2.5,
"V0370574.JPG": 1.5,
}
for desc in descs:
assert "filename" in desc
assert (
abs(expected[desc["filename"]] - desc["MAPCompassHeading"]["TrueHeading"])
< 0.00001
)
assert (
abs(
expected[desc["filename"]]
- desc["MAPCompassHeading"]["MagneticHeading"]
)
< 0.00001
)
def test_process_boolean_options(
setup_config: py.path.local, setup_data: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
boolean_options = [
"--add_file_name",
"--add_import_date",
"--exclude_import_path",
"--interpolate_directions",
"--overwrite_EXIF_direction_tag",
"--overwrite_EXIF_gps_tag",
"--overwrite_EXIF_orientation_tag",
"--overwrite_EXIF_time_tag",
"--overwrite_all_EXIF_tags",
"--skip_subfolders",
"--windows_path",
]
for option in boolean_options:
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} {option}",
shell=True,
)
assert x.returncode == 0, x.stderr
all_options = " ".join(boolean_options)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} {all_options}",
shell=True,
)
assert x.returncode == 0, x.stderr
GPX_CONTENT = """
<gpx>
<trk>
<name>Mapillary GPX</name>
<trkseg>
<trkpt lat="0.02" lon="0.01">
<ele>1</ele>
<time>2018-06-08T13:23:34.805</time>
</trkpt>
<trkpt lat="2.02" lon="0.01">
<ele>2</ele>
<time>2018-06-08T13:24:35.809</time>
</trkpt>
<trkpt lat="2.02" lon="2.01">
<ele>4</ele>
<time>2018-06-08T13:33:36.813</time>
</trkpt>
<trkpt lat="4.02" lon="2.01">
<ele>9</ele>
<time>2018-06-08T13:58:37.812</time>
</trkpt>
</trkseg>
</trk>
</gpx>
"""
def find_desc_errors(descs):
return [desc for desc in descs if "error" in desc]
def filter_out_errors(descs):
return [desc for desc in descs if "error" not in desc]
def test_geotagging_from_gpx(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("mapillary_image_description.json")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --geotag_source_path {gpx_file} --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
"DSC00001.JPG": [
"2018_06_08_13_24_10_000",
0.01,
1.1738587633597797,
1.5769293816798897,
],
"DSC00497.JPG": [
"2018_06_08_13_32_28_000",
1.7556100139740183,
2.02,
3.7456100139740185,
],
}
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def test_geotagging_from_gpx_with_offset(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("mapillary_image_description.json")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --geotag_source_path {gpx_file} --interpolation_offset_time=-20 --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
"DSC00001.JPG": [
"2018_06_08_13_23_50_000",
0.01,
0.5181640548160776,
1.2490820274080388,
],
"DSC00497.JPG": [
"2018_06_08_13_32_08_000",
1.6816734072206487,
2.02,
3.671673407220649,
],
}
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def test_geotagging_from_gpx_use_gpx_start_time(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --interpolation_use_gpx_start_time --geotag_source_path {gpx_file} --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
"DSC00001.JPG": ["2018_06_08_13_23_34_805", 0.01, 0.02, 1.0],
"DSC00497.JPG": [
"2018_06_08_13_31_52_805",
1.6255000702397762,
2.02,
3.6155000702397766,
],
}
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def test_geotagging_from_gpx_use_gpx_start_time_with_offset(setup_data: py.path.local):
gpx_file = setup_data.join("test.gpx")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} process {setup_data} --geotag_source gpx --interpolation_use_gpx_start_time --geotag_source_path {gpx_file} --interpolation_offset_time=100 --skip_process_errors",
shell=True,
)
assert x.returncode == 0, x.stderr
expected_lonlat = {
"DSC00001.JPG": [
"2018_06_08_13_25_14_805",
0.15416159584772016,
2.02,
2.14416159584772,
],
"DSC00497.JPG": [
"2018_06_08_13_33_32_805",
1.9951831040066244,
2.02,
3.985183104006625,
],
}
desc_path = setup_data.join("mapillary_image_description.json")
with open(desc_path) as fp:
descs = json.load(fp)
assert {"V0370574.JPG"} == {d["filename"] for d in find_desc_errors(descs)}
for desc in find_desc_errors(descs):
assert desc.get("error").get("type") == "MapillaryOutsideGPXTrackError"
for desc in filter_out_errors(descs):
assert expected_lonlat[desc["filename"]][0] == desc["MAPCaptureTime"]
assert (
abs(expected_lonlat[desc["filename"]][1] - desc["MAPLongitude"]) < 0.00001
)
assert abs(expected_lonlat[desc["filename"]][2] - desc["MAPLatitude"]) < 0.00001
assert abs(expected_lonlat[desc["filename"]][3] - desc["MAPAltitude"]) < 0.00001
def ffmpeg_installed():
ffmpeg_path = os.getenv("MAPILLARY_FFMPEG_PATH", "ffmpeg")
try:
subprocess.run([ffmpeg_path, "-version"])
except FileNotFoundError:
return False
return True
is_ffmpeg_installed = ffmpeg_installed()
def test_sample_video(setup_data: py.path.local):
if not is_ffmpeg_installed:
pytest.skip("skip because ffmpeg not installed")
for input_path in [setup_data, setup_data.join("sample-5s.mp4")]:
x = subprocess.run(
f"{EXECUTABLE} sample_video --rerun {input_path}",
shell=True,
)
assert x.returncode != 0, x.stderr
assert len(setup_data.join("mapillary_sampled_video_frames").listdir()) == 0
x = subprocess.run(
f"{EXECUTABLE} sample_video --skip_sample_errors --rerun {input_path}",
shell=True,
)
assert x.returncode == 0, x.stderr
assert len(setup_data.join("mapillary_sampled_video_frames").listdir()) == 0
x = subprocess.run(
f"{EXECUTABLE} sample_video --video_start_time 2021_10_10_10_10_10_123 --rerun {input_path}",
shell=True,
)
assert x.returncode == 0, x.stderr
sample_path = setup_data.join("mapillary_sampled_video_frames")
assert len(sample_path.listdir()) == 1
samples = sample_path.join("sample-5s.mp4").listdir()
samples.sort()
times = []
for s in samples:
with s.open("rb") as fp:
tags = exifread.process_file(fp)
times.append(tags["EXIF DateTimeOriginal"].values)
assert (
"2021:10:10 10:10:10.123",
"2021:10:10 10:10:12.123",
"2021:10:10 10:10:14.123",
) == tuple(times)
def test_video_process(setup_data: py.path.local):
if not is_ffmpeg_installed:
pytest.skip("skip because ffmpeg not installed")
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("my_samples").join("mapillary_image_description.json")
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} video_process --video_start_time 2018_06_08_13_23_34_123 --geotag_source gpx --geotag_source_path {gpx_file} {setup_data} {setup_data.join('my_samples')}",
shell=True,
)
assert x.returncode != 0, x.stderr
with open(desc_path) as fp:
descs = json.load(fp)
assert 1 == len(find_desc_errors(descs))
assert 2 == len(filter_out_errors(descs))
def test_video_process_multiple_videos(setup_data: py.path.local):
if not is_ffmpeg_installed:
pytest.skip("skip because ffmpeg not installed")
gpx_file = setup_data.join("test.gpx")
desc_path = setup_data.join("my_samples").join("mapillary_image_description.json")
sub_folder = setup_data.join("video_sub_folder").mkdir()
video_path = setup_data.join("sample-5s.mp4")
video_path.copy(sub_folder)
with gpx_file.open("w") as fp:
fp.write(GPX_CONTENT)
x = subprocess.run(
f"{EXECUTABLE} video_process --video_start_time 2018_06_08_13_23_34_123 --geotag_source gpx --geotag_source_path {gpx_file} {video_path} {setup_data.join('my_samples')}",
shell=True,
)
assert x.returncode != 0, x.stderr
with open(desc_path) as fp:
descs = json.load(fp)
for d in descs:
assert d["filename"].startswith("sample-5s.mp4/")
assert 1 == len(find_desc_errors(descs))
assert 2 == len(filter_out_errors(descs))
def file_md5sum(path) -> str:
with open(path, "rb") as fp:
md5 = hashlib.md5()
while True:
buf = fp.read(1024 * 1024 * 32)
if not buf:
break
md5.update(buf)
return md5.hexdigest()
def test_upload_mp4(
tmpdir: py.path.local, setup_data: py.path.local, setup_config: py.path.local
):
os.environ["MAPILLARY_CONFIG_PATH"] = str(setup_config)
upload_dir = tmpdir.mkdir("mapillary_public_uploads")
os.environ["MAPILLARY_UPLOAD_PATH"] = str(upload_dir)
video_path = setup_data.join("sample-5s.mp4")
md5sum = file_md5sum(video_path)
x = subprocess.run(
f"{EXECUTABLE} upload {video_path} --dry_run --user_name={USERNAME}",
shell=True,
)
assert x.returncode == 0, x.stderr
assert 1 == len(upload_dir.listdir())
assert {"mly_tools_8cd0e9af15f4baaafe9dfe98ace8b886.mp4"} == {
os.path.basename(f) for f in upload_dir.listdir()
}
assert {md5sum} == {file_md5sum(f) for f in upload_dir.listdir()}
| true | true |
f728735a0a2cd2a637b30db6ca8659076398b7a8 | 4,417 | py | Python | examples/sac.py | vincentlui/unsupervised-goal-conditioned-rl | 4f2e6938e072cb52f8ee779a939fe7bf6a980d45 | [
"MIT"
] | null | null | null | examples/sac.py | vincentlui/unsupervised-goal-conditioned-rl | 4f2e6938e072cb52f8ee779a939fe7bf6a980d45 | [
"MIT"
] | null | null | null | examples/sac.py | vincentlui/unsupervised-goal-conditioned-rl | 4f2e6938e072cb52f8ee779a939fe7bf6a980d45 | [
"MIT"
] | null | null | null | from gym.envs.mujoco import HalfCheetahEnv
import argparse
import gym
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic
from rlkit.torch.sac.sac import SACTrainer
from rlkit.torch.networks import FlattenMlp
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
from envs.navigation2d.navigation2d import Navigation2d
from rlkit.envs.mujoco.ant import AntEnv
from rlkit.envs.mujoco.half_cheetah import HalfCheetahEnv
def experiment(variant, args):
expl_env, eval_env = get_env(str(args.env))
# expl_env = NormalizedBoxEnv(HalfCheetahEnv())
# eval_env = NormalizedBoxEnv(HalfCheetahEnv())
obs_dim = expl_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
M = variant['layer_size']
qf1 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
qf2 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf1 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf2 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
policy = TanhGaussianPolicy(
obs_dim=obs_dim,
action_dim=action_dim,
hidden_sizes=[M, M],
)
eval_policy = MakeDeterministic(policy)
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
expl_path_collector = MdpPathCollector(
expl_env,
policy,
)
replay_buffer = EnvReplayBuffer(
variant['replay_buffer_size'],
expl_env,
)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['trainer_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
def get_env(name):
if name == 'test':
expl_env, eval_env = Navigation2d(), Navigation2d()
# expl_env.set_random_start_state(True)
# eval_env.set_random_start_state(True)
return NormalizedBoxEnv(expl_env), NormalizedBoxEnv(eval_env)
elif name == 'Ant':
return NormalizedBoxEnv(AntEnv(expose_all_qpos=False)), NormalizedBoxEnv(AntEnv(expose_all_qpos=True))
elif name == 'Half-cheetah':
return NormalizedBoxEnv(HalfCheetahEnv(expose_all_qpos=False)), NormalizedBoxEnv(HalfCheetahEnv(expose_all_qpos=False))
return NormalizedBoxEnv(gym.make(name)), NormalizedBoxEnv(gym.make(name))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('env', type=str,
help='environment')
args = parser.parse_args()
# noinspection PyTypeChecker
variant = dict(
algorithm="SAC",
version="normal",
layer_size=128,
replay_buffer_size=int(1E6),
algorithm_kwargs=dict(
num_epochs=3000,
num_eval_steps_per_epoch=2000,
num_trains_per_train_loop=200,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=200,
batch_size=128,
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
)
setup_logger('name-of-experiment', variant=variant)
setup_logger('SAC' + '_' + args.env, variant=variant, snapshot_mode="gap_and_last",
snapshot_gap=100, )
# ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
experiment(variant, args)
| 32.962687 | 127 | 0.672628 | from gym.envs.mujoco import HalfCheetahEnv
import argparse
import gym
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic
from rlkit.torch.sac.sac import SACTrainer
from rlkit.torch.networks import FlattenMlp
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
from envs.navigation2d.navigation2d import Navigation2d
from rlkit.envs.mujoco.ant import AntEnv
from rlkit.envs.mujoco.half_cheetah import HalfCheetahEnv
def experiment(variant, args):
expl_env, eval_env = get_env(str(args.env))
obs_dim = expl_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
M = variant['layer_size']
qf1 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
qf2 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf1 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
target_qf2 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=[M, M],
)
policy = TanhGaussianPolicy(
obs_dim=obs_dim,
action_dim=action_dim,
hidden_sizes=[M, M],
)
eval_policy = MakeDeterministic(policy)
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
expl_path_collector = MdpPathCollector(
expl_env,
policy,
)
replay_buffer = EnvReplayBuffer(
variant['replay_buffer_size'],
expl_env,
)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['trainer_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
def get_env(name):
if name == 'test':
expl_env, eval_env = Navigation2d(), Navigation2d()
return NormalizedBoxEnv(expl_env), NormalizedBoxEnv(eval_env)
elif name == 'Ant':
return NormalizedBoxEnv(AntEnv(expose_all_qpos=False)), NormalizedBoxEnv(AntEnv(expose_all_qpos=True))
elif name == 'Half-cheetah':
return NormalizedBoxEnv(HalfCheetahEnv(expose_all_qpos=False)), NormalizedBoxEnv(HalfCheetahEnv(expose_all_qpos=False))
return NormalizedBoxEnv(gym.make(name)), NormalizedBoxEnv(gym.make(name))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('env', type=str,
help='environment')
args = parser.parse_args()
variant = dict(
algorithm="SAC",
version="normal",
layer_size=128,
replay_buffer_size=int(1E6),
algorithm_kwargs=dict(
num_epochs=3000,
num_eval_steps_per_epoch=2000,
num_trains_per_train_loop=200,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=200,
batch_size=128,
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
)
setup_logger('name-of-experiment', variant=variant)
setup_logger('SAC' + '_' + args.env, variant=variant, snapshot_mode="gap_and_last",
snapshot_gap=100, )
| true | true |
f72874a50d223d4834382995b21fd851e0fdff25 | 75 | py | Python | simpleArraySum.py | ariz-ahmad/hackerrank-algorithms | e7fbce8581a1e502aa4b37313351f742f73b62ff | [
"MIT"
] | null | null | null | simpleArraySum.py | ariz-ahmad/hackerrank-algorithms | e7fbce8581a1e502aa4b37313351f742f73b62ff | [
"MIT"
] | null | null | null | simpleArraySum.py | ariz-ahmad/hackerrank-algorithms | e7fbce8581a1e502aa4b37313351f742f73b62ff | [
"MIT"
] | null | null | null | def simpleArraySum(ar):
# sum() function works on lists
return sum(ar) | 25 | 32 | 0.706667 | def simpleArraySum(ar):
return sum(ar) | true | true |
f72874af72e89ceba6c537a1f15babc566ebbff2 | 169 | py | Python | integrations/__init__.py | IgorHoholko/metrics | 5510ccd99eaec5ab8175bbd5e2ad9e66e82d10e4 | [
"Apache-2.0"
] | 1 | 2021-05-16T11:36:04.000Z | 2021-05-16T11:36:04.000Z | integrations/__init__.py | IgorHoholko/metrics | 5510ccd99eaec5ab8175bbd5e2ad9e66e82d10e4 | [
"Apache-2.0"
] | null | null | null | integrations/__init__.py | IgorHoholko/metrics | 5510ccd99eaec5ab8175bbd5e2ad9e66e82d10e4 | [
"Apache-2.0"
] | null | null | null | import operator
from torchmetrics.utilities.imports import _compare_version
_LIGHTNING_GREATER_EQUAL_1_3 = _compare_version("pytorch_lightning", operator.ge, "1.3.0")
| 28.166667 | 90 | 0.840237 | import operator
from torchmetrics.utilities.imports import _compare_version
_LIGHTNING_GREATER_EQUAL_1_3 = _compare_version("pytorch_lightning", operator.ge, "1.3.0")
| true | true |
f72874d120229a1b88e2d6714969c996d23a2339 | 28,165 | py | Python | src/pretix/presale/views/event.py | bhaettasch/pretix | 5e355b400573783bdd17b1352aefcb36b0efc3f6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/presale/views/event.py | bhaettasch/pretix | 5e355b400573783bdd17b1352aefcb36b0efc3f6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/presale/views/event.py | bhaettasch/pretix | 5e355b400573783bdd17b1352aefcb36b0efc3f6 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import calendar
import sys
from collections import defaultdict
from datetime import date, datetime, timedelta
from decimal import Decimal
from importlib import import_module
import isoweek
import pytz
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.db.models import (
Count, Exists, IntegerField, OuterRef, Prefetch, Value,
)
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.decorators import method_decorator
from django.utils.formats import get_format
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _, pgettext_lazy
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView
from pretix.base.channels import get_all_sales_channels
from pretix.base.models import ItemVariation, Quota, SeatCategoryMapping
from pretix.base.models.event import SubEvent
from pretix.base.models.items import (
ItemBundle, SubEventItem, SubEventItemVariation,
)
from pretix.base.services.quotas import QuotaAvailability
from pretix.helpers.compat import date_fromisocalendar
from pretix.multidomain.urlreverse import eventreverse
from pretix.presale.ical import get_ical
from pretix.presale.signals import item_description
from pretix.presale.views.organizer import (
EventListMixin, add_subevents_for_days, days_for_template,
filter_qs_by_attr, weeks_for_template,
)
from ...helpers.formats.en.formats import WEEK_FORMAT
from . import (
CartMixin, EventViewMixin, allow_frame_if_namespaced, get_cart,
iframe_entry_view_wrapper,
)
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore
def item_group_by_category(items):
return sorted(
[
# a group is a tuple of a category and a list of items
(cat, [i for i in items if i.category == cat])
for cat in set([i.category for i in items])
# insert categories into a set for uniqueness
# a set is unsorted, so sort again by category
],
key=lambda group: (group[0].position, group[0].id) if (
group[0] is not None and group[0].id is not None) else (0, 0)
)
def get_grouped_items(event, subevent=None, voucher=None, channel='web', require_seat=0, base_qs=None, allow_addons=False,
quota_cache=None, filter_items=None, filter_categories=None):
base_qs_set = base_qs is not None
base_qs = base_qs if base_qs is not None else event.items
requires_seat = Exists(
SeatCategoryMapping.objects.filter(
product_id=OuterRef('pk'),
subevent=subevent
)
)
if not event.settings.seating_choice:
requires_seat = Value(0, output_field=IntegerField())
items = base_qs.using(settings.DATABASE_REPLICA).filter_available(channel=channel, voucher=voucher, allow_addons=allow_addons).select_related(
'category', 'tax_rule', # for re-grouping
'hidden_if_available',
).prefetch_related(
Prefetch('quotas',
to_attr='_subevent_quotas',
queryset=event.quotas.using(settings.DATABASE_REPLICA).filter(subevent=subevent)),
Prefetch('bundles',
queryset=ItemBundle.objects.using(settings.DATABASE_REPLICA).prefetch_related(
Prefetch('bundled_item',
queryset=event.items.using(settings.DATABASE_REPLICA).select_related('tax_rule').prefetch_related(
Prefetch('quotas',
to_attr='_subevent_quotas',
queryset=event.quotas.using(settings.DATABASE_REPLICA).filter(subevent=subevent)),
)),
Prefetch('bundled_variation',
queryset=ItemVariation.objects.using(
settings.DATABASE_REPLICA
).select_related('item', 'item__tax_rule').filter(item__event=event).prefetch_related(
Prefetch('quotas',
to_attr='_subevent_quotas',
queryset=event.quotas.using(settings.DATABASE_REPLICA).filter(subevent=subevent)),
)),
)),
Prefetch('variations', to_attr='available_variations',
queryset=ItemVariation.objects.using(settings.DATABASE_REPLICA).annotate(
subevent_disabled=Exists(
SubEventItemVariation.objects.filter(
variation_id=OuterRef('pk'),
subevent=subevent,
disabled=True,
)
),
).filter(
active=True, quotas__isnull=False, subevent_disabled=False
).prefetch_related(
Prefetch('quotas',
to_attr='_subevent_quotas',
queryset=event.quotas.using(settings.DATABASE_REPLICA).filter(subevent=subevent))
).distinct()),
).annotate(
quotac=Count('quotas'),
has_variations=Count('variations'),
subevent_disabled=Exists(
SubEventItem.objects.filter(
item_id=OuterRef('pk'),
subevent=subevent,
disabled=True,
)
),
requires_seat=requires_seat,
).filter(
quotac__gt=0, subevent_disabled=False,
).order_by('category__position', 'category_id', 'position', 'name')
if require_seat:
items = items.filter(requires_seat__gt=0)
else:
items = items.filter(requires_seat=0)
if filter_items:
items = items.filter(pk__in=[a for a in filter_items if a.isdigit()])
if filter_categories:
items = items.filter(category_id__in=[a for a in filter_categories if a.isdigit()])
display_add_to_cart = False
quota_cache_key = f'item_quota_cache:{subevent.id if subevent else 0}:{channel}:{bool(require_seat)}'
quota_cache = quota_cache or event.cache.get(quota_cache_key) or {}
quota_cache_existed = bool(quota_cache)
if subevent:
item_price_override = subevent.item_price_overrides
var_price_override = subevent.var_price_overrides
else:
item_price_override = {}
var_price_override = {}
restrict_vars = set()
if voucher and voucher.quota_id:
# If a voucher is set to a specific quota, we need to filter out on that level
restrict_vars = set(voucher.quota.variations.all())
quotas_to_compute = []
for item in items:
if item.has_variations:
for v in item.available_variations:
for q in v._subevent_quotas:
if q.pk not in quota_cache:
quotas_to_compute.append(q)
else:
for q in item._subevent_quotas:
if q.pk not in quota_cache:
quotas_to_compute.append(q)
if quotas_to_compute:
qa = QuotaAvailability()
qa.queue(*quotas_to_compute)
qa.compute()
quota_cache.update({q.pk: r for q, r in qa.results.items()})
for item in items:
if voucher and voucher.item_id and voucher.variation_id:
# Restrict variations if the voucher only allows one
item.available_variations = [v for v in item.available_variations
if v.pk == voucher.variation_id]
if get_all_sales_channels()[channel].unlimited_items_per_order:
max_per_order = sys.maxsize
else:
max_per_order = item.max_per_order or int(event.settings.max_items_per_order)
if item.hidden_if_available:
q = item.hidden_if_available.availability(_cache=quota_cache)
if q[0] == Quota.AVAILABILITY_OK:
item._remove = True
continue
item.description = str(item.description)
for recv, resp in item_description.send(sender=event, item=item, variation=None):
if resp:
item.description += ("<br/>" if item.description else "") + resp
if not item.has_variations:
item._remove = False
if not bool(item._subevent_quotas):
item._remove = True
continue
if voucher and (voucher.allow_ignore_quota or voucher.block_quota):
item.cached_availability = (
Quota.AVAILABILITY_OK, voucher.max_usages - voucher.redeemed
)
else:
item.cached_availability = list(
item.check_quotas(subevent=subevent, _cache=quota_cache, include_bundled=True)
)
if event.settings.hide_sold_out and item.cached_availability[0] < Quota.AVAILABILITY_RESERVED:
item._remove = True
continue
item.order_max = min(
item.cached_availability[1]
if item.cached_availability[1] is not None else sys.maxsize,
max_per_order
)
original_price = item_price_override.get(item.pk, item.default_price)
if voucher:
price = voucher.calculate_price(original_price)
else:
price = original_price
item.display_price = item.tax(price, currency=event.currency, include_bundled=True)
if price != original_price:
item.original_price = item.tax(original_price, currency=event.currency, include_bundled=True)
else:
item.original_price = (
item.tax(item.original_price, currency=event.currency, include_bundled=True,
base_price_is='net' if event.settings.display_net_prices else 'gross') # backwards-compat
if item.original_price else None
)
display_add_to_cart = display_add_to_cart or item.order_max > 0
else:
for var in item.available_variations:
var.description = str(var.description)
for recv, resp in item_description.send(sender=event, item=item, variation=var):
if resp:
var.description += ("<br/>" if var.description else "") + resp
if voucher and (voucher.allow_ignore_quota or voucher.block_quota):
var.cached_availability = (
Quota.AVAILABILITY_OK, voucher.max_usages - voucher.redeemed
)
else:
var.cached_availability = list(
var.check_quotas(subevent=subevent, _cache=quota_cache, include_bundled=True)
)
var.order_max = min(
var.cached_availability[1]
if var.cached_availability[1] is not None else sys.maxsize,
max_per_order
)
original_price = var_price_override.get(var.pk, var.price)
if voucher:
price = voucher.calculate_price(original_price)
else:
price = original_price
var.display_price = var.tax(price, currency=event.currency, include_bundled=True)
if price != original_price:
var.original_price = var.tax(original_price, currency=event.currency, include_bundled=True)
else:
var.original_price = (
var.tax(var.original_price or item.original_price, currency=event.currency,
include_bundled=True,
base_price_is='net' if event.settings.display_net_prices else 'gross') # backwards-compat
) if var.original_price or item.original_price else None
display_add_to_cart = display_add_to_cart or var.order_max > 0
item.original_price = (
item.tax(item.original_price, currency=event.currency, include_bundled=True,
base_price_is='net' if event.settings.display_net_prices else 'gross') # backwards-compat
if item.original_price else None
)
item.available_variations = [
v for v in item.available_variations if v._subevent_quotas and (
not voucher or not voucher.quota_id or v in restrict_vars
)
]
if event.settings.hide_sold_out:
item.available_variations = [v for v in item.available_variations
if v.cached_availability[0] >= Quota.AVAILABILITY_RESERVED]
if voucher and voucher.variation_id:
item.available_variations = [v for v in item.available_variations
if v.pk == voucher.variation_id]
if len(item.available_variations) > 0:
item.min_price = min([v.display_price.net if event.settings.display_net_prices else
v.display_price.gross for v in item.available_variations])
item.max_price = max([v.display_price.net if event.settings.display_net_prices else
v.display_price.gross for v in item.available_variations])
item._remove = not bool(item.available_variations)
if not quota_cache_existed and not voucher and not allow_addons and not base_qs_set and not filter_items and not filter_categories:
event.cache.set(quota_cache_key, quota_cache, 5)
items = [item for item in items
if (len(item.available_variations) > 0 or not item.has_variations) and not item._remove]
return items, display_add_to_cart
@method_decorator(allow_frame_if_namespaced, 'dispatch')
@method_decorator(iframe_entry_view_wrapper, 'dispatch')
class EventIndex(EventViewMixin, EventListMixin, CartMixin, TemplateView):
template_name = "pretixpresale/event/index.html"
def get(self, request, *args, **kwargs):
from pretix.presale.views.cart import get_or_create_cart_id
self.subevent = None
if request.GET.get('src', '') == 'widget' and 'take_cart_id' in request.GET:
# User has clicked "Open in a new tab" link in widget
get_or_create_cart_id(request)
return redirect(eventreverse(request.event, 'presale:event.index', kwargs=kwargs))
elif request.GET.get('iframe', '') == '1' and 'take_cart_id' in request.GET:
# Widget just opened, a cart already exists. Let's to a stupid redirect to check if cookies are disabled
get_or_create_cart_id(request)
return redirect(eventreverse(request.event, 'presale:event.index', kwargs=kwargs) + '?require_cookie=true&cart_id={}'.format(
request.GET.get('take_cart_id')
))
elif request.GET.get('iframe', '') == '1' and len(self.request.GET.get('widget_data', '{}')) > 3:
# We've been passed data from a widget, we need to create a cart session to store it.
get_or_create_cart_id(request)
elif 'require_cookie' in request.GET and settings.SESSION_COOKIE_NAME not in request.COOKIES:
# Cookies are in fact not supported
r = render(request, 'pretixpresale/event/cookies.html', {
'url': eventreverse(
request.event, "presale:event.index", kwargs={'cart_namespace': kwargs.get('cart_namespace') or ''}
) + (
"?src=widget&take_cart_id={}".format(request.GET.get('cart_id'))
if "cart_id" in request.GET else ""
)
})
r._csp_ignore = True
return r
if request.sales_channel.identifier not in request.event.sales_channels:
raise Http404(_('Tickets for this event cannot be purchased on this sales channel.'))
if request.event.has_subevents:
if 'subevent' in kwargs:
self.subevent = request.event.subevents.using(settings.DATABASE_REPLICA).filter(pk=kwargs['subevent'], active=True).first()
if not self.subevent:
raise Http404()
return super().get(request, *args, **kwargs)
else:
return super().get(request, *args, **kwargs)
else:
if 'subevent' in kwargs:
return redirect(self.get_index_url())
else:
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Show voucher option if an event is selected and vouchers exist
vouchers_exist = self.request.event.cache.get('vouchers_exist')
if vouchers_exist is None:
vouchers_exist = self.request.event.vouchers.exists()
self.request.event.cache.set('vouchers_exist', vouchers_exist)
if not self.request.event.has_subevents or self.subevent:
# Fetch all items
items, display_add_to_cart = get_grouped_items(
self.request.event, self.subevent,
filter_items=self.request.GET.getlist('item'),
filter_categories=self.request.GET.getlist('category'),
channel=self.request.sales_channel.identifier
)
context['itemnum'] = len(items)
context['allfree'] = all(
item.display_price.gross == Decimal('0.00') for item in items if not item.has_variations
) and all(
all(
var.display_price.gross == Decimal('0.00')
for var in item.available_variations
)
for item in items if item.has_variations
)
# Regroup those by category
context['items_by_category'] = item_group_by_category(items)
context['display_add_to_cart'] = display_add_to_cart
context['show_vouchers'] = vouchers_exist
context['vouchers_exist'] = vouchers_exist
else:
context['show_vouchers'] = False
context['vouchers_exist'] = vouchers_exist
context['ev'] = self.subevent or self.request.event
context['subevent'] = self.subevent
context['cart'] = self.get_cart()
context['has_addon_choices'] = any(cp.has_addon_choices for cp in get_cart(self.request))\
if self.subevent:
context['frontpage_text'] = str(self.subevent.frontpage_text)
else:
context['frontpage_text'] = str(self.request.event.settings.frontpage_text)
if self.request.event.has_subevents:
context.update(self._subevent_list_context())
context['show_cart'] = (
context['cart']['positions'] and (
self.request.event.has_subevents or self.request.event.presale_is_running
)
)
if self.request.event.settings.redirect_to_checkout_directly:
context['cart_redirect'] = eventreverse(self.request.event, 'presale:event.checkout.start',
kwargs={'cart_namespace': kwargs.get('cart_namespace') or ''})
if context['cart_redirect'].startswith('https:'):
context['cart_redirect'] = '/' + context['cart_redirect'].split('/', 3)[3]
else:
context['cart_redirect'] = self.request.path
return context
def _subevent_list_context(self):
context = {}
context['list_type'] = self.request.GET.get("style", self.request.event.settings.event_list_type)
if context['list_type'] not in ("calendar", "week") and self.request.event.subevents.filter(date_from__gt=now()).count() > 50:
if self.request.event.settings.event_list_type not in ("calendar", "week"):
self.request.event.settings.event_list_type = "calendar"
context['list_type'] = "calendar"
if context['list_type'] == "calendar":
self._set_month_year()
tz = pytz.timezone(self.request.event.settings.timezone)
_, ndays = calendar.monthrange(self.year, self.month)
before = datetime(self.year, self.month, 1, 0, 0, 0, tzinfo=tz) - timedelta(days=1)
after = datetime(self.year, self.month, ndays, 0, 0, 0, tzinfo=tz) + timedelta(days=1)
context['date'] = date(self.year, self.month, 1)
context['before'] = before
context['after'] = after
ebd = defaultdict(list)
add_subevents_for_days(
filter_qs_by_attr(self.request.event.subevents_annotated(self.request.sales_channel.identifier).using(settings.DATABASE_REPLICA), self.request),
before, after, ebd, set(), self.request.event,
self.kwargs.get('cart_namespace')
)
context['show_names'] = ebd.get('_subevents_different_names', False) or sum(
len(i) for i in ebd.values() if isinstance(i, list)
) < 2
context['weeks'] = weeks_for_template(ebd, self.year, self.month)
context['months'] = [date(self.year, i + 1, 1) for i in range(12)]
context['years'] = range(now().year - 2, now().year + 3)
elif context['list_type'] == "week":
self._set_week_year()
tz = pytz.timezone(self.request.event.settings.timezone)
week = isoweek.Week(self.year, self.week)
before = datetime(
week.monday().year, week.monday().month, week.monday().day, 0, 0, 0, tzinfo=tz
) - timedelta(days=1)
after = datetime(
week.sunday().year, week.sunday().month, week.sunday().day, 0, 0, 0, tzinfo=tz
) + timedelta(days=1)
context['date'] = week.monday()
context['before'] = before
context['after'] = after
ebd = defaultdict(list)
add_subevents_for_days(
filter_qs_by_attr(self.request.event.subevents_annotated(self.request.sales_channel.identifier).using(settings.DATABASE_REPLICA), self.request),
before, after, ebd, set(), self.request.event,
self.kwargs.get('cart_namespace')
)
context['show_names'] = ebd.get('_subevents_different_names', False) or sum(
len(i) for i in ebd.values() if isinstance(i, list)
) < 2
context['days'] = days_for_template(ebd, week)
context['weeks'] = [
(date_fromisocalendar(self.year, i + 1, 1), date_fromisocalendar(self.year, i + 1, 7))
for i in range(53 if date(self.year, 12, 31).isocalendar()[1] == 53 else 52)
]
context['years'] = range(now().year - 2, now().year + 3)
context['week_format'] = get_format('WEEK_FORMAT')
if context['week_format'] == 'WEEK_FORMAT':
context['week_format'] = WEEK_FORMAT
else:
context['subevent_list'] = self.request.event.subevents_sorted(
filter_qs_by_attr(self.request.event.subevents_annotated(self.request.sales_channel.identifier).using(settings.DATABASE_REPLICA), self.request)
)
return context
@method_decorator(allow_frame_if_namespaced, 'dispatch')
@method_decorator(iframe_entry_view_wrapper, 'dispatch')
class SeatingPlanView(EventViewMixin, TemplateView):
template_name = "pretixpresale/event/seatingplan.html"
def get(self, request, *args, **kwargs):
from pretix.presale.views.cart import get_or_create_cart_id
self.subevent = None
if request.GET.get('src', '') == 'widget' and 'take_cart_id' in request.GET:
# User has clicked "Open in a new tab" link in widget
get_or_create_cart_id(request)
return redirect(eventreverse(request.event, 'presale:event.seatingplan', kwargs=kwargs))
elif request.GET.get('iframe', '') == '1' and 'take_cart_id' in request.GET:
# Widget just opened, a cart already exists. Let's to a stupid redirect to check if cookies are disabled
get_or_create_cart_id(request)
return redirect(eventreverse(request.event, 'presale:event.seatingplan', kwargs=kwargs) + '?require_cookie=true&cart_id={}'.format(
request.GET.get('take_cart_id')
))
elif request.GET.get('iframe', '') == '1' and len(self.request.GET.get('widget_data', '{}')) > 3:
# We've been passed data from a widget, we need to create a cart session to store it.
get_or_create_cart_id(request)
if request.event.has_subevents:
if 'subevent' in kwargs:
self.subevent = request.event.subevents.using(settings.DATABASE_REPLICA).filter(pk=kwargs['subevent'], active=True).first()
if not self.subevent or not self.subevent.seating_plan:
raise Http404()
return super().get(request, *args, **kwargs)
else:
raise Http404()
else:
if 'subevent' in kwargs or not request.event.seating_plan:
raise Http404()
else:
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['subevent'] = self.subevent
context['cart_redirect'] = eventreverse(self.request.event, 'presale:event.checkout.start',
kwargs={'cart_namespace': kwargs.get('cart_namespace') or ''})
if context['cart_redirect'].startswith('https:'):
context['cart_redirect'] = '/' + context['cart_redirect'].split('/', 3)[3]
return context
class EventIcalDownload(EventViewMixin, View):
def get(self, request, *args, **kwargs):
if not self.request.event:
raise Http404(_('Unknown event code or not authorized to access this event.'))
subevent = None
if request.event.has_subevents:
if 'subevent' in kwargs:
subevent = get_object_or_404(SubEvent, event=request.event, pk=kwargs['subevent'], active=True)
else:
raise Http404(pgettext_lazy('subevent', 'No date selected.'))
else:
if 'subevent' in kwargs:
raise Http404(pgettext_lazy('subevent', 'Unknown date selected.'))
event = self.request.event
cal = get_ical([subevent or event])
resp = HttpResponse(cal.serialize(), content_type='text/calendar')
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}.ics"'.format(
event.organizer.slug, event.slug, subevent.pk if subevent else '0',
)
return resp
class EventAuth(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
s = SessionStore(request.POST.get('session'))
try:
data = s.load()
except:
raise PermissionDenied(_('Please go back and try again.'))
parent = data.get('pretix_event_access_{}'.format(request.event.pk))
sparent = SessionStore(parent)
try:
parentdata = sparent.load()
except:
raise PermissionDenied(_('Please go back and try again.'))
else:
if 'event_access' not in parentdata:
raise PermissionDenied(_('Please go back and try again.'))
request.session['pretix_event_access_{}'.format(request.event.pk)] = parent
return redirect(eventreverse(request.event, 'presale:event.index'))
| 46.400329 | 160 | 0.606994 | import calendar
import sys
from collections import defaultdict
from datetime import date, datetime, timedelta
from decimal import Decimal
from importlib import import_module
import isoweek
import pytz
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.db.models import (
Count, Exists, IntegerField, OuterRef, Prefetch, Value,
)
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.decorators import method_decorator
from django.utils.formats import get_format
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _, pgettext_lazy
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView
from pretix.base.channels import get_all_sales_channels
from pretix.base.models import ItemVariation, Quota, SeatCategoryMapping
from pretix.base.models.event import SubEvent
from pretix.base.models.items import (
ItemBundle, SubEventItem, SubEventItemVariation,
)
from pretix.base.services.quotas import QuotaAvailability
from pretix.helpers.compat import date_fromisocalendar
from pretix.multidomain.urlreverse import eventreverse
from pretix.presale.ical import get_ical
from pretix.presale.signals import item_description
from pretix.presale.views.organizer import (
EventListMixin, add_subevents_for_days, days_for_template,
filter_qs_by_attr, weeks_for_template,
)
from ...helpers.formats.en.formats import WEEK_FORMAT
from . import (
CartMixin, EventViewMixin, allow_frame_if_namespaced, get_cart,
iframe_entry_view_wrapper,
)
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore
def item_group_by_category(items):
return sorted(
[
(cat, [i for i in items if i.category == cat])
for cat in set([i.category for i in items])
],
key=lambda group: (group[0].position, group[0].id) if (
group[0] is not None and group[0].id is not None) else (0, 0)
)
def get_grouped_items(event, subevent=None, voucher=None, channel='web', require_seat=0, base_qs=None, allow_addons=False,
quota_cache=None, filter_items=None, filter_categories=None):
base_qs_set = base_qs is not None
base_qs = base_qs if base_qs is not None else event.items
requires_seat = Exists(
SeatCategoryMapping.objects.filter(
product_id=OuterRef('pk'),
subevent=subevent
)
)
if not event.settings.seating_choice:
requires_seat = Value(0, output_field=IntegerField())
items = base_qs.using(settings.DATABASE_REPLICA).filter_available(channel=channel, voucher=voucher, allow_addons=allow_addons).select_related(
'category', 'tax_rule',
'hidden_if_available',
).prefetch_related(
Prefetch('quotas',
to_attr='_subevent_quotas',
queryset=event.quotas.using(settings.DATABASE_REPLICA).filter(subevent=subevent)),
Prefetch('bundles',
queryset=ItemBundle.objects.using(settings.DATABASE_REPLICA).prefetch_related(
Prefetch('bundled_item',
queryset=event.items.using(settings.DATABASE_REPLICA).select_related('tax_rule').prefetch_related(
Prefetch('quotas',
to_attr='_subevent_quotas',
queryset=event.quotas.using(settings.DATABASE_REPLICA).filter(subevent=subevent)),
)),
Prefetch('bundled_variation',
queryset=ItemVariation.objects.using(
settings.DATABASE_REPLICA
).select_related('item', 'item__tax_rule').filter(item__event=event).prefetch_related(
Prefetch('quotas',
to_attr='_subevent_quotas',
queryset=event.quotas.using(settings.DATABASE_REPLICA).filter(subevent=subevent)),
)),
)),
Prefetch('variations', to_attr='available_variations',
queryset=ItemVariation.objects.using(settings.DATABASE_REPLICA).annotate(
subevent_disabled=Exists(
SubEventItemVariation.objects.filter(
variation_id=OuterRef('pk'),
subevent=subevent,
disabled=True,
)
),
).filter(
active=True, quotas__isnull=False, subevent_disabled=False
).prefetch_related(
Prefetch('quotas',
to_attr='_subevent_quotas',
queryset=event.quotas.using(settings.DATABASE_REPLICA).filter(subevent=subevent))
).distinct()),
).annotate(
quotac=Count('quotas'),
has_variations=Count('variations'),
subevent_disabled=Exists(
SubEventItem.objects.filter(
item_id=OuterRef('pk'),
subevent=subevent,
disabled=True,
)
),
requires_seat=requires_seat,
).filter(
quotac__gt=0, subevent_disabled=False,
).order_by('category__position', 'category_id', 'position', 'name')
if require_seat:
items = items.filter(requires_seat__gt=0)
else:
items = items.filter(requires_seat=0)
if filter_items:
items = items.filter(pk__in=[a for a in filter_items if a.isdigit()])
if filter_categories:
items = items.filter(category_id__in=[a for a in filter_categories if a.isdigit()])
display_add_to_cart = False
quota_cache_key = f'item_quota_cache:{subevent.id if subevent else 0}:{channel}:{bool(require_seat)}'
quota_cache = quota_cache or event.cache.get(quota_cache_key) or {}
quota_cache_existed = bool(quota_cache)
if subevent:
item_price_override = subevent.item_price_overrides
var_price_override = subevent.var_price_overrides
else:
item_price_override = {}
var_price_override = {}
restrict_vars = set()
if voucher and voucher.quota_id:
restrict_vars = set(voucher.quota.variations.all())
quotas_to_compute = []
for item in items:
if item.has_variations:
for v in item.available_variations:
for q in v._subevent_quotas:
if q.pk not in quota_cache:
quotas_to_compute.append(q)
else:
for q in item._subevent_quotas:
if q.pk not in quota_cache:
quotas_to_compute.append(q)
if quotas_to_compute:
qa = QuotaAvailability()
qa.queue(*quotas_to_compute)
qa.compute()
quota_cache.update({q.pk: r for q, r in qa.results.items()})
for item in items:
if voucher and voucher.item_id and voucher.variation_id:
item.available_variations = [v for v in item.available_variations
if v.pk == voucher.variation_id]
if get_all_sales_channels()[channel].unlimited_items_per_order:
max_per_order = sys.maxsize
else:
max_per_order = item.max_per_order or int(event.settings.max_items_per_order)
if item.hidden_if_available:
q = item.hidden_if_available.availability(_cache=quota_cache)
if q[0] == Quota.AVAILABILITY_OK:
item._remove = True
continue
item.description = str(item.description)
for recv, resp in item_description.send(sender=event, item=item, variation=None):
if resp:
item.description += ("<br/>" if item.description else "") + resp
if not item.has_variations:
item._remove = False
if not bool(item._subevent_quotas):
item._remove = True
continue
if voucher and (voucher.allow_ignore_quota or voucher.block_quota):
item.cached_availability = (
Quota.AVAILABILITY_OK, voucher.max_usages - voucher.redeemed
)
else:
item.cached_availability = list(
item.check_quotas(subevent=subevent, _cache=quota_cache, include_bundled=True)
)
if event.settings.hide_sold_out and item.cached_availability[0] < Quota.AVAILABILITY_RESERVED:
item._remove = True
continue
item.order_max = min(
item.cached_availability[1]
if item.cached_availability[1] is not None else sys.maxsize,
max_per_order
)
original_price = item_price_override.get(item.pk, item.default_price)
if voucher:
price = voucher.calculate_price(original_price)
else:
price = original_price
item.display_price = item.tax(price, currency=event.currency, include_bundled=True)
if price != original_price:
item.original_price = item.tax(original_price, currency=event.currency, include_bundled=True)
else:
item.original_price = (
item.tax(item.original_price, currency=event.currency, include_bundled=True,
base_price_is='net' if event.settings.display_net_prices else 'gross')
if item.original_price else None
)
display_add_to_cart = display_add_to_cart or item.order_max > 0
else:
for var in item.available_variations:
var.description = str(var.description)
for recv, resp in item_description.send(sender=event, item=item, variation=var):
if resp:
var.description += ("<br/>" if var.description else "") + resp
if voucher and (voucher.allow_ignore_quota or voucher.block_quota):
var.cached_availability = (
Quota.AVAILABILITY_OK, voucher.max_usages - voucher.redeemed
)
else:
var.cached_availability = list(
var.check_quotas(subevent=subevent, _cache=quota_cache, include_bundled=True)
)
var.order_max = min(
var.cached_availability[1]
if var.cached_availability[1] is not None else sys.maxsize,
max_per_order
)
original_price = var_price_override.get(var.pk, var.price)
if voucher:
price = voucher.calculate_price(original_price)
else:
price = original_price
var.display_price = var.tax(price, currency=event.currency, include_bundled=True)
if price != original_price:
var.original_price = var.tax(original_price, currency=event.currency, include_bundled=True)
else:
var.original_price = (
var.tax(var.original_price or item.original_price, currency=event.currency,
include_bundled=True,
base_price_is='net' if event.settings.display_net_prices else 'gross')
) if var.original_price or item.original_price else None
display_add_to_cart = display_add_to_cart or var.order_max > 0
item.original_price = (
item.tax(item.original_price, currency=event.currency, include_bundled=True,
base_price_is='net' if event.settings.display_net_prices else 'gross')
if item.original_price else None
)
item.available_variations = [
v for v in item.available_variations if v._subevent_quotas and (
not voucher or not voucher.quota_id or v in restrict_vars
)
]
if event.settings.hide_sold_out:
item.available_variations = [v for v in item.available_variations
if v.cached_availability[0] >= Quota.AVAILABILITY_RESERVED]
if voucher and voucher.variation_id:
item.available_variations = [v for v in item.available_variations
if v.pk == voucher.variation_id]
if len(item.available_variations) > 0:
item.min_price = min([v.display_price.net if event.settings.display_net_prices else
v.display_price.gross for v in item.available_variations])
item.max_price = max([v.display_price.net if event.settings.display_net_prices else
v.display_price.gross for v in item.available_variations])
item._remove = not bool(item.available_variations)
if not quota_cache_existed and not voucher and not allow_addons and not base_qs_set and not filter_items and not filter_categories:
event.cache.set(quota_cache_key, quota_cache, 5)
items = [item for item in items
if (len(item.available_variations) > 0 or not item.has_variations) and not item._remove]
return items, display_add_to_cart
@method_decorator(allow_frame_if_namespaced, 'dispatch')
@method_decorator(iframe_entry_view_wrapper, 'dispatch')
class EventIndex(EventViewMixin, EventListMixin, CartMixin, TemplateView):
template_name = "pretixpresale/event/index.html"
def get(self, request, *args, **kwargs):
from pretix.presale.views.cart import get_or_create_cart_id
self.subevent = None
if request.GET.get('src', '') == 'widget' and 'take_cart_id' in request.GET:
get_or_create_cart_id(request)
return redirect(eventreverse(request.event, 'presale:event.index', kwargs=kwargs))
elif request.GET.get('iframe', '') == '1' and 'take_cart_id' in request.GET:
get_or_create_cart_id(request)
return redirect(eventreverse(request.event, 'presale:event.index', kwargs=kwargs) + '?require_cookie=true&cart_id={}'.format(
request.GET.get('take_cart_id')
))
elif request.GET.get('iframe', '') == '1' and len(self.request.GET.get('widget_data', '{}')) > 3:
# We've been passed data from a widget, we need to create a cart session to store it.
get_or_create_cart_id(request)
elif 'require_cookie' in request.GET and settings.SESSION_COOKIE_NAME not in request.COOKIES:
r = render(request, 'pretixpresale/event/cookies.html', {
'url': eventreverse(
request.event, "presale:event.index", kwargs={'cart_namespace': kwargs.get('cart_namespace') or ''}
) + (
"?src=widget&take_cart_id={}".format(request.GET.get('cart_id'))
if "cart_id" in request.GET else ""
)
})
r._csp_ignore = True
return r
if request.sales_channel.identifier not in request.event.sales_channels:
raise Http404(_('Tickets for this event cannot be purchased on this sales channel.'))
if request.event.has_subevents:
if 'subevent' in kwargs:
self.subevent = request.event.subevents.using(settings.DATABASE_REPLICA).filter(pk=kwargs['subevent'], active=True).first()
if not self.subevent:
raise Http404()
return super().get(request, *args, **kwargs)
else:
return super().get(request, *args, **kwargs)
else:
if 'subevent' in kwargs:
return redirect(self.get_index_url())
else:
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
vouchers_exist = self.request.event.cache.get('vouchers_exist')
if vouchers_exist is None:
vouchers_exist = self.request.event.vouchers.exists()
self.request.event.cache.set('vouchers_exist', vouchers_exist)
if not self.request.event.has_subevents or self.subevent:
items, display_add_to_cart = get_grouped_items(
self.request.event, self.subevent,
filter_items=self.request.GET.getlist('item'),
filter_categories=self.request.GET.getlist('category'),
channel=self.request.sales_channel.identifier
)
context['itemnum'] = len(items)
context['allfree'] = all(
item.display_price.gross == Decimal('0.00') for item in items if not item.has_variations
) and all(
all(
var.display_price.gross == Decimal('0.00')
for var in item.available_variations
)
for item in items if item.has_variations
)
context['items_by_category'] = item_group_by_category(items)
context['display_add_to_cart'] = display_add_to_cart
context['show_vouchers'] = vouchers_exist
context['vouchers_exist'] = vouchers_exist
else:
context['show_vouchers'] = False
context['vouchers_exist'] = vouchers_exist
context['ev'] = self.subevent or self.request.event
context['subevent'] = self.subevent
context['cart'] = self.get_cart()
context['has_addon_choices'] = any(cp.has_addon_choices for cp in get_cart(self.request))\
if self.subevent:
context['frontpage_text'] = str(self.subevent.frontpage_text)
else:
context['frontpage_text'] = str(self.request.event.settings.frontpage_text)
if self.request.event.has_subevents:
context.update(self._subevent_list_context())
context['show_cart'] = (
context['cart']['positions'] and (
self.request.event.has_subevents or self.request.event.presale_is_running
)
)
if self.request.event.settings.redirect_to_checkout_directly:
context['cart_redirect'] = eventreverse(self.request.event, 'presale:event.checkout.start',
kwargs={'cart_namespace': kwargs.get('cart_namespace') or ''})
if context['cart_redirect'].startswith('https:'):
context['cart_redirect'] = '/' + context['cart_redirect'].split('/', 3)[3]
else:
context['cart_redirect'] = self.request.path
return context
def _subevent_list_context(self):
context = {}
context['list_type'] = self.request.GET.get("style", self.request.event.settings.event_list_type)
if context['list_type'] not in ("calendar", "week") and self.request.event.subevents.filter(date_from__gt=now()).count() > 50:
if self.request.event.settings.event_list_type not in ("calendar", "week"):
self.request.event.settings.event_list_type = "calendar"
context['list_type'] = "calendar"
if context['list_type'] == "calendar":
self._set_month_year()
tz = pytz.timezone(self.request.event.settings.timezone)
_, ndays = calendar.monthrange(self.year, self.month)
before = datetime(self.year, self.month, 1, 0, 0, 0, tzinfo=tz) - timedelta(days=1)
after = datetime(self.year, self.month, ndays, 0, 0, 0, tzinfo=tz) + timedelta(days=1)
context['date'] = date(self.year, self.month, 1)
context['before'] = before
context['after'] = after
ebd = defaultdict(list)
add_subevents_for_days(
filter_qs_by_attr(self.request.event.subevents_annotated(self.request.sales_channel.identifier).using(settings.DATABASE_REPLICA), self.request),
before, after, ebd, set(), self.request.event,
self.kwargs.get('cart_namespace')
)
context['show_names'] = ebd.get('_subevents_different_names', False) or sum(
len(i) for i in ebd.values() if isinstance(i, list)
) < 2
context['weeks'] = weeks_for_template(ebd, self.year, self.month)
context['months'] = [date(self.year, i + 1, 1) for i in range(12)]
context['years'] = range(now().year - 2, now().year + 3)
elif context['list_type'] == "week":
self._set_week_year()
tz = pytz.timezone(self.request.event.settings.timezone)
week = isoweek.Week(self.year, self.week)
before = datetime(
week.monday().year, week.monday().month, week.monday().day, 0, 0, 0, tzinfo=tz
) - timedelta(days=1)
after = datetime(
week.sunday().year, week.sunday().month, week.sunday().day, 0, 0, 0, tzinfo=tz
) + timedelta(days=1)
context['date'] = week.monday()
context['before'] = before
context['after'] = after
ebd = defaultdict(list)
add_subevents_for_days(
filter_qs_by_attr(self.request.event.subevents_annotated(self.request.sales_channel.identifier).using(settings.DATABASE_REPLICA), self.request),
before, after, ebd, set(), self.request.event,
self.kwargs.get('cart_namespace')
)
context['show_names'] = ebd.get('_subevents_different_names', False) or sum(
len(i) for i in ebd.values() if isinstance(i, list)
) < 2
context['days'] = days_for_template(ebd, week)
context['weeks'] = [
(date_fromisocalendar(self.year, i + 1, 1), date_fromisocalendar(self.year, i + 1, 7))
for i in range(53 if date(self.year, 12, 31).isocalendar()[1] == 53 else 52)
]
context['years'] = range(now().year - 2, now().year + 3)
context['week_format'] = get_format('WEEK_FORMAT')
if context['week_format'] == 'WEEK_FORMAT':
context['week_format'] = WEEK_FORMAT
else:
context['subevent_list'] = self.request.event.subevents_sorted(
filter_qs_by_attr(self.request.event.subevents_annotated(self.request.sales_channel.identifier).using(settings.DATABASE_REPLICA), self.request)
)
return context
@method_decorator(allow_frame_if_namespaced, 'dispatch')
@method_decorator(iframe_entry_view_wrapper, 'dispatch')
class SeatingPlanView(EventViewMixin, TemplateView):
template_name = "pretixpresale/event/seatingplan.html"
def get(self, request, *args, **kwargs):
from pretix.presale.views.cart import get_or_create_cart_id
self.subevent = None
if request.GET.get('src', '') == 'widget' and 'take_cart_id' in request.GET:
get_or_create_cart_id(request)
return redirect(eventreverse(request.event, 'presale:event.seatingplan', kwargs=kwargs))
elif request.GET.get('iframe', '') == '1' and 'take_cart_id' in request.GET:
get_or_create_cart_id(request)
return redirect(eventreverse(request.event, 'presale:event.seatingplan', kwargs=kwargs) + '?require_cookie=true&cart_id={}'.format(
request.GET.get('take_cart_id')
))
elif request.GET.get('iframe', '') == '1' and len(self.request.GET.get('widget_data', '{}')) > 3:
# We've been passed data from a widget, we need to create a cart session to store it.
get_or_create_cart_id(request)
if request.event.has_subevents:
if 'subevent' in kwargs:
self.subevent = request.event.subevents.using(settings.DATABASE_REPLICA).filter(pk=kwargs['subevent'], active=True).first()
if not self.subevent or not self.subevent.seating_plan:
raise Http404()
return super().get(request, *args, **kwargs)
else:
raise Http404()
else:
if 'subevent' in kwargs or not request.event.seating_plan:
raise Http404()
else:
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['subevent'] = self.subevent
context['cart_redirect'] = eventreverse(self.request.event, 'presale:event.checkout.start',
kwargs={'cart_namespace': kwargs.get('cart_namespace') or ''})
if context['cart_redirect'].startswith('https:'):
context['cart_redirect'] = '/' + context['cart_redirect'].split('/', 3)[3]
return context
class EventIcalDownload(EventViewMixin, View):
def get(self, request, *args, **kwargs):
if not self.request.event:
raise Http404(_('Unknown event code or not authorized to access this event.'))
subevent = None
if request.event.has_subevents:
if 'subevent' in kwargs:
subevent = get_object_or_404(SubEvent, event=request.event, pk=kwargs['subevent'], active=True)
else:
raise Http404(pgettext_lazy('subevent', 'No date selected.'))
else:
if 'subevent' in kwargs:
raise Http404(pgettext_lazy('subevent', 'Unknown date selected.'))
event = self.request.event
cal = get_ical([subevent or event])
resp = HttpResponse(cal.serialize(), content_type='text/calendar')
resp['Content-Disposition'] = 'attachment; filename="{}-{}-{}.ics"'.format(
event.organizer.slug, event.slug, subevent.pk if subevent else '0',
)
return resp
class EventAuth(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
s = SessionStore(request.POST.get('session'))
try:
data = s.load()
except:
raise PermissionDenied(_('Please go back and try again.'))
parent = data.get('pretix_event_access_{}'.format(request.event.pk))
sparent = SessionStore(parent)
try:
parentdata = sparent.load()
except:
raise PermissionDenied(_('Please go back and try again.'))
else:
if 'event_access' not in parentdata:
raise PermissionDenied(_('Please go back and try again.'))
request.session['pretix_event_access_{}'.format(request.event.pk)] = parent
return redirect(eventreverse(request.event, 'presale:event.index'))
| true | true |
f728752e4a893f2f382eb0826510c850d1a5bf78 | 640 | py | Python | setup.py | ucsb-pstat/jupyter-rsession-proxy | 47552352ff19e9dc015ae2da50ae9b2eeec89b54 | [
"BSD-3-Clause"
] | 1 | 2020-01-31T09:54:06.000Z | 2020-01-31T09:54:06.000Z | setup.py | scivm/jupyter-rsession-proxy | 8d1bbd0254944c35686198d3ce03ab1776c48a29 | [
"BSD-3-Clause"
] | null | null | null | setup.py | scivm/jupyter-rsession-proxy | 8d1bbd0254944c35686198d3ce03ab1776c48a29 | [
"BSD-3-Clause"
] | null | null | null | import setuptools
setuptools.setup(
name="jupyter-rsession-proxy",
version='1.0dev',
url="https://github.com/jupyterhub/jupyter-rsession-proxy",
author="Ryan Lovett & Yuvi Panda",
description="Jupyter extension to proxy RStudio's rsession",
packages=setuptools.find_packages(),
keywords=['Jupyter'],
classifiers=['Framework :: Jupyter'],
install_requires=[
'jupyter-server-proxy'
],
entry_points={
'jupyter_serverproxy_servers': [
'rstudio = jupyter_rsession_proxy:setup_rstudio',
]
},
package_data={
'jupyter_rsession_proxy': ['icons/*'],
},
)
| 26.666667 | 64 | 0.65 | import setuptools
setuptools.setup(
name="jupyter-rsession-proxy",
version='1.0dev',
url="https://github.com/jupyterhub/jupyter-rsession-proxy",
author="Ryan Lovett & Yuvi Panda",
description="Jupyter extension to proxy RStudio's rsession",
packages=setuptools.find_packages(),
keywords=['Jupyter'],
classifiers=['Framework :: Jupyter'],
install_requires=[
'jupyter-server-proxy'
],
entry_points={
'jupyter_serverproxy_servers': [
'rstudio = jupyter_rsession_proxy:setup_rstudio',
]
},
package_data={
'jupyter_rsession_proxy': ['icons/*'],
},
)
| true | true |
f7287564eb28c49e89edff884c44b57881472382 | 901 | py | Python | k8spackage/commands/version.py | ant31/k8spackage-crd | dd79657389ead5e153a552bfb380f3915f7943d3 | [
"Apache-2.0"
] | null | null | null | k8spackage/commands/version.py | ant31/k8spackage-crd | dd79657389ead5e153a552bfb380f3915f7943d3 | [
"Apache-2.0"
] | null | null | null | k8spackage/commands/version.py | ant31/k8spackage-crd | dd79657389ead5e153a552bfb380f3915f7943d3 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function
import k8spackage
from k8spackage.commands.command_base import CommandBase
class VersionCmd(CommandBase):
name = 'version'
help_message = "show version"
def __init__(self, options):
super(VersionCmd, self).__init__(options)
self.client_version = None
def _cli_version(self):
return k8spackage.__version__
def _version(self):
return {"client-version": self._cli_version()}
@classmethod
def _add_arguments(cls, parser):
cls._add_output_option(parser)
def _call(self):
version = self._version()
self.client_version = version['client-version']
def _render_dict(self):
return {"client-version": self.client_version}
def _render_console(self):
return "\n".join([
"Client-version: %s" % self.client_version])
| 25.742857 | 64 | 0.679245 | from __future__ import absolute_import, division, print_function
import k8spackage
from k8spackage.commands.command_base import CommandBase
class VersionCmd(CommandBase):
name = 'version'
help_message = "show version"
def __init__(self, options):
super(VersionCmd, self).__init__(options)
self.client_version = None
def _cli_version(self):
return k8spackage.__version__
def _version(self):
return {"client-version": self._cli_version()}
@classmethod
def _add_arguments(cls, parser):
cls._add_output_option(parser)
def _call(self):
version = self._version()
self.client_version = version['client-version']
def _render_dict(self):
return {"client-version": self.client_version}
def _render_console(self):
return "\n".join([
"Client-version: %s" % self.client_version])
| true | true |
f72876b7d8eb365c8c41991b4115489692254a89 | 8,591 | py | Python | openclean_pattern/utils/utils.py | VIDA-NYU/openclean-pattern | 8d8a94691f9bfa5dcf8773b08ceb8e562fce52df | [
"BSD-3-Clause"
] | 4 | 2021-04-20T09:05:51.000Z | 2022-01-28T14:13:37.000Z | openclean_pattern/utils/utils.py | VIDA-NYU/openclean-pattern | 8d8a94691f9bfa5dcf8773b08ceb8e562fce52df | [
"BSD-3-Clause"
] | 1 | 2021-04-09T08:49:33.000Z | 2021-04-09T08:49:33.000Z | openclean_pattern/utils/utils.py | VIDA-NYU/openclean-pattern | 8d8a94691f9bfa5dcf8773b08ceb8e562fce52df | [
"BSD-3-Clause"
] | null | null | null | # This file is part of the Pattern and Anomaly Detection Library (openclean_pattern).
#
# Copyright (C) 2021 New York University.
#
# openclean_pattern is released under the Revised BSD License. See file LICENSE for
# full license details.
"""A collection of useful utility methods"""
import re
from abc import ABCMeta, abstractmethod
import random
import bisect
from collections import Counter
# -- Comparators --------------------------------------------------------------
class Comparator(metaclass=ABCMeta):
"""Compares different dataitems
"""
@abstractmethod
def compare(self, a, b, meta=None):
"""Compares a with b and returns True if a and b are equal. The comparison can involve any
extra meta information that the user wants to consider
Parameters:
----------
a: Any
the datatype to compare
b: Any
the datatype to compare against
meta: Any (Optional)
any extra information used in the comparison
Returns
-------
bool
"""
raise NotImplementedError()
class StringComparator(Comparator):
"""Class of useful string comparison methods
"""
@staticmethod
def compare_strings(s1, s2, ambiguous_char='X'):
"""
Compares two strings in sequence of characters and replaces distinct
characters with ambiguous character. Then returns the new string along
with an ambiguity ratio
Parameters
----------
s1 : str
string 1
s2 : str
string 2
ambiguous_char: str
replaces the distinct characters with
Returns
-------
str, float
"""
smaller_size = min(len(s1), len(s2))
new_string = ''
for i in range(smaller_size):
if s1[i] == s2[i]:
new_string += s1[i]
else:
new_string += ambiguous_char
for j in range(abs(len(s1) - len(s2))):
new_string += ambiguous_char
ambiguity = new_string.count(ambiguous_char) / len(new_string) if len(new_string) > 0 else 0
return new_string, ambiguity
@staticmethod
def substring_finder(string1, string2):
anslist = []
len1, len2 = len(string1), len(string2)
for i in range(len1):
match = ""
for j in range(len2):
if (i + j < len1 and string1[i + j] == string2[j]):
match += string2[j]
else:
answer = match
if answer != '' and len(answer) > 1:
anslist.append(answer)
match = ""
if match != '':
anslist.append(match)
return anslist
def has_numbers(inputString):
return bool(re.search(r'\d', inputString))
# -- Samplers -----------------------------------------------------------------
class Sampler(metaclass=ABCMeta):
"""Class to sample an input iterable. This was necessary because pandas.sample sampling can be slow."""
def __init__(self, iterable, n=1):
"""initizlizes the Sampler class
Parameters
----------
iterable: Iterable
the iterable class object which has data to be sampled
n: float
the proportion or number of records to sample
"""
self.iterable = iterable
self.n = n
self.frac = 0 <= n <= 1
@abstractmethod
def __call__(self, *args, **kwargs):
"""Method to sample the input iterable sequence
"""
raise NotImplementedError()
def sample(self):
"""a convenience sample method
"""
return self.__call__()
class WeightedRandomSampler(Sampler):
"""Implements weighted random sampling using the distribution provided collections.Counter object.
Based on the work: https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/
Note: if a Counter or dict of type {value:frequency} is passed in, there is no rowidx information tied to
the sampled series and this can possibly require an extra lookup during anomaly detection
"""
def __init__(self, weights, n=1, random_state=None):
"""initizlizes the WeightedRandomSampler class
Parameters
----------
weights: collections.Counter
the counter object in the format key:frequency
n: float
the proportion or number of records to sample
random_state: int (default: None)
the seed value for the pseudo random number generator
"""
super(WeightedRandomSampler, self).__init__(weights, n)
self.random_state = random_state
self.totals = [] # cumulative sum
running_total = 0
for w in weights.values():
running_total += w
self.totals.append(running_total)
def next(self):
"""selects a new randomly sampled value from the input series based on their weight distribution and returns
the respective index
Returns
-------
int
"""
rnd = random.random() * self.totals[-1]
return bisect.bisect_right(self.totals, rnd)
def __call__(self):
"""samples n (or n*total_inputs, if n is a fraction) times and returns the sampled frequencies as a counter
Returns
-------
sampled list of rows
"""
sample = Counter()
n = int(self.totals[-1] * self.n) if self.frac else int(self.n)
keys = list(self.iterable.keys())
random.seed(self.random_state)
for _c in range(n):
sample[keys[self.next()]] += 1
return WeightedRandomSampler.counter_to_list(sample)
@staticmethod
def counter_to_list(counter):
""" method to create a series list from a counter object
Parameters
----------
counter: collections.Counter
the counter object to convert to a list
Returns
-------
list of values
"""
series = list()
for k, v in counter.items():
for _ in range(v):
series.append(k)
return series
class RandomSampler(Sampler):
"""Class to randomly sample an input iterable. This was necessary because pandas.sample samples a dataframe
which can be slow.
Note: if a Counter or dict of type {value:frequency} is passed in, there is no rowidx information tied to
the sampled series and this can possibly require an extra lookup during anomaly detection
"""
def __init__(self, iterable, n=1, random_state=None):
"""initizlizes the Random Sampler class
Parameters
----------
iterable: Iterable
the iterable class object which has data to be sampled
n: float
the proportion or number of records to sample
random_state: int (default: None)
the seed value for the pseudo random number generator
"""
super(RandomSampler, self).__init__(iterable, n)
self.random_state = random_state
def __call__(self, *args, **kwargs):
"""Method to sample the input iterable sequence
Returns
-------
sampled list of rows
"""
random.seed(self.random_state)
n = int(len(self.iterable) * self.n) if self.frac else int(self.n)
return random.sample(self.iterable, n)
class Distinct(Sampler):
"""Class to select only the distinct values from the input iterable"""
def __init__(self, iterable):
"""initizlizes the Distinct class
Parameters
----------
iterable: Iterable
the iterable class object which has data to be sampled
"""
super(Distinct, self).__init__(iterable, 1)
def __call__(self, *args, **kwargs):
"""Method to distinct-ify the input iterable sequence
Returns
-------
distinct list of rows
"""
return list(set(self.iterable))
# -- Helper methods -----------------------------------------------------------------
def list_contains_list(o, tree_types=list):
"""checks is list contains more lists"""
if isinstance(o, tree_types):
for v in o:
if isinstance(v, tree_types):
return True
elif not isinstance(o, tree_types):
# ignore values that arent lists themselves
return True
return False | 30.572954 | 116 | 0.579444 |
import re
from abc import ABCMeta, abstractmethod
import random
import bisect
from collections import Counter
class Comparator(metaclass=ABCMeta):
@abstractmethod
def compare(self, a, b, meta=None):
raise NotImplementedError()
class StringComparator(Comparator):
@staticmethod
def compare_strings(s1, s2, ambiguous_char='X'):
smaller_size = min(len(s1), len(s2))
new_string = ''
for i in range(smaller_size):
if s1[i] == s2[i]:
new_string += s1[i]
else:
new_string += ambiguous_char
for j in range(abs(len(s1) - len(s2))):
new_string += ambiguous_char
ambiguity = new_string.count(ambiguous_char) / len(new_string) if len(new_string) > 0 else 0
return new_string, ambiguity
@staticmethod
def substring_finder(string1, string2):
anslist = []
len1, len2 = len(string1), len(string2)
for i in range(len1):
match = ""
for j in range(len2):
if (i + j < len1 and string1[i + j] == string2[j]):
match += string2[j]
else:
answer = match
if answer != '' and len(answer) > 1:
anslist.append(answer)
match = ""
if match != '':
anslist.append(match)
return anslist
def has_numbers(inputString):
return bool(re.search(r'\d', inputString))
class Sampler(metaclass=ABCMeta):
def __init__(self, iterable, n=1):
self.iterable = iterable
self.n = n
self.frac = 0 <= n <= 1
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError()
def sample(self):
return self.__call__()
class WeightedRandomSampler(Sampler):
def __init__(self, weights, n=1, random_state=None):
super(WeightedRandomSampler, self).__init__(weights, n)
self.random_state = random_state
self.totals = []
running_total = 0
for w in weights.values():
running_total += w
self.totals.append(running_total)
def next(self):
rnd = random.random() * self.totals[-1]
return bisect.bisect_right(self.totals, rnd)
def __call__(self):
sample = Counter()
n = int(self.totals[-1] * self.n) if self.frac else int(self.n)
keys = list(self.iterable.keys())
random.seed(self.random_state)
for _c in range(n):
sample[keys[self.next()]] += 1
return WeightedRandomSampler.counter_to_list(sample)
@staticmethod
def counter_to_list(counter):
series = list()
for k, v in counter.items():
for _ in range(v):
series.append(k)
return series
class RandomSampler(Sampler):
def __init__(self, iterable, n=1, random_state=None):
super(RandomSampler, self).__init__(iterable, n)
self.random_state = random_state
def __call__(self, *args, **kwargs):
random.seed(self.random_state)
n = int(len(self.iterable) * self.n) if self.frac else int(self.n)
return random.sample(self.iterable, n)
class Distinct(Sampler):
def __init__(self, iterable):
super(Distinct, self).__init__(iterable, 1)
def __call__(self, *args, **kwargs):
return list(set(self.iterable))
def list_contains_list(o, tree_types=list):
if isinstance(o, tree_types):
for v in o:
if isinstance(v, tree_types):
return True
elif not isinstance(o, tree_types):
return True
return False | true | true |
f72876d778a8e2b83e563d9d86d02501fb609ea5 | 920 | py | Python | src/project/wsgi.py | fahadalmutairi/edgetest | cc37a9cd53da74bd32f69f7a099679bccf0268a7 | [
"MIT"
] | null | null | null | src/project/wsgi.py | fahadalmutairi/edgetest | cc37a9cd53da74bd32f69f7a099679bccf0268a7 | [
"MIT"
] | null | null | null | src/project/wsgi.py | fahadalmutairi/edgetest | cc37a9cd53da74bd32f69f7a099679bccf0268a7 | [
"MIT"
] | null | null | null | """
WSGI config for project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings.production")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Wrap werkzeug debugger if DEBUG is on
from django.conf import settings
if settings.DEBUG:
try:
import django.views.debug
import six
from werkzeug.debug import DebuggedApplication
def null_technical_500_response(request, exc_type, exc_value, tb):
six.reraise(exc_type, exc_value, tb)
django.views.debug.technical_500_response = null_technical_500_response
application = DebuggedApplication(application, evalex=True)
except ImportError:
pass
| 30.666667 | 79 | 0.75 | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings.production")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
from django.conf import settings
if settings.DEBUG:
try:
import django.views.debug
import six
from werkzeug.debug import DebuggedApplication
def null_technical_500_response(request, exc_type, exc_value, tb):
six.reraise(exc_type, exc_value, tb)
django.views.debug.technical_500_response = null_technical_500_response
application = DebuggedApplication(application, evalex=True)
except ImportError:
pass
| true | true |
f72877481dde1a80618e5757bcee6e91a830d0a1 | 1,730 | py | Python | d3rlpy-master/tests/dynamics/torch/test_probabilistic_ensemble_dynamics_impl.py | SOPR-T/SOPR-T | 3242461fa8b3e917cde70be497beb1158a7b27e6 | [
"MIT"
] | 1 | 2021-07-09T22:39:28.000Z | 2021-07-09T22:39:28.000Z | tests/dynamics/torch/test_probabilistic_ensemble_dynamics_impl.py | astrojuanlu/d3rlpy | e27852664647b7774f56ec775437b0ca73a24f3f | [
"MIT"
] | null | null | null | tests/dynamics/torch/test_probabilistic_ensemble_dynamics_impl.py | astrojuanlu/d3rlpy | e27852664647b7774f56ec775437b0ca73a24f3f | [
"MIT"
] | null | null | null | import pytest
from d3rlpy.dynamics.torch.probabilistic_ensemble_dynamics_impl import (
ProbabilisticEnsembleDynamicsImpl,
)
from d3rlpy.models.encoders import DefaultEncoderFactory
from d3rlpy.models.optimizers import AdamFactory
from tests.algos.algo_test import DummyActionScaler, DummyScaler
from tests.dynamics.dynamics_test import torch_impl_tester
@pytest.mark.parametrize("observation_shape", [(100,)])
@pytest.mark.parametrize("action_size", [2])
@pytest.mark.parametrize("learning_rate", [1e-3])
@pytest.mark.parametrize("optim_factory", [AdamFactory()])
@pytest.mark.parametrize("encoder_factory", [DefaultEncoderFactory()])
@pytest.mark.parametrize("n_ensembles", [5])
@pytest.mark.parametrize("variance_type", ["max"])
@pytest.mark.parametrize("discrete_action", [False, True])
@pytest.mark.parametrize("scaler", [None, DummyScaler()])
@pytest.mark.parametrize("action_scaler", [None, DummyActionScaler()])
def test_probabilistic_ensemble_dynamics_impl(
observation_shape,
action_size,
learning_rate,
optim_factory,
encoder_factory,
n_ensembles,
variance_type,
discrete_action,
scaler,
action_scaler,
):
impl = ProbabilisticEnsembleDynamicsImpl(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=learning_rate,
optim_factory=optim_factory,
encoder_factory=encoder_factory,
n_ensembles=n_ensembles,
variance_type=variance_type,
discrete_action=discrete_action,
use_gpu=None,
scaler=scaler,
action_scaler=action_scaler if not discrete_action else None,
)
impl.build()
torch_impl_tester(impl, discrete=discrete_action, n_ensembles=n_ensembles)
| 35.306122 | 78 | 0.760116 | import pytest
from d3rlpy.dynamics.torch.probabilistic_ensemble_dynamics_impl import (
ProbabilisticEnsembleDynamicsImpl,
)
from d3rlpy.models.encoders import DefaultEncoderFactory
from d3rlpy.models.optimizers import AdamFactory
from tests.algos.algo_test import DummyActionScaler, DummyScaler
from tests.dynamics.dynamics_test import torch_impl_tester
@pytest.mark.parametrize("observation_shape", [(100,)])
@pytest.mark.parametrize("action_size", [2])
@pytest.mark.parametrize("learning_rate", [1e-3])
@pytest.mark.parametrize("optim_factory", [AdamFactory()])
@pytest.mark.parametrize("encoder_factory", [DefaultEncoderFactory()])
@pytest.mark.parametrize("n_ensembles", [5])
@pytest.mark.parametrize("variance_type", ["max"])
@pytest.mark.parametrize("discrete_action", [False, True])
@pytest.mark.parametrize("scaler", [None, DummyScaler()])
@pytest.mark.parametrize("action_scaler", [None, DummyActionScaler()])
def test_probabilistic_ensemble_dynamics_impl(
observation_shape,
action_size,
learning_rate,
optim_factory,
encoder_factory,
n_ensembles,
variance_type,
discrete_action,
scaler,
action_scaler,
):
impl = ProbabilisticEnsembleDynamicsImpl(
observation_shape=observation_shape,
action_size=action_size,
learning_rate=learning_rate,
optim_factory=optim_factory,
encoder_factory=encoder_factory,
n_ensembles=n_ensembles,
variance_type=variance_type,
discrete_action=discrete_action,
use_gpu=None,
scaler=scaler,
action_scaler=action_scaler if not discrete_action else None,
)
impl.build()
torch_impl_tester(impl, discrete=discrete_action, n_ensembles=n_ensembles)
| true | true |
f728777d43e9d57d72069d624f8ac3b9c45d8173 | 21,643 | py | Python | wizard.py | ubergeek42/lambda-lets-encrypt | 01e81577dabc984b18512233ddae37755d4e2acb | [
"MIT"
] | 219 | 2016-01-12T02:49:37.000Z | 2022-03-30T11:29:58.000Z | wizard.py | ubergeek42/lambda-lets-encrypt | 01e81577dabc984b18512233ddae37755d4e2acb | [
"MIT"
] | 27 | 2016-01-12T02:59:38.000Z | 2018-05-17T18:27:39.000Z | wizard.py | ubergeek42/lambda-lets-encrypt | 01e81577dabc984b18512233ddae37755d4e2acb | [
"MIT"
] | 34 | 2016-01-22T01:13:19.000Z | 2021-10-02T23:04:42.000Z | #!/usr/bin/env python
"""Lambda Lets-Encrypt Configuration/Setup Tool
This is a wizard that will help you configure the Lambda function to
automatically manage your SSL certifcates for CloudFront Distributions.
Usage:
setup.py
setup.py (-h | --help)
setup.py --version
Options:
-h --help Show this screen
--version Show the version
"""
from __future__ import print_function
import json
import textwrap
import time
import zipfile
from docopt import docopt
from string import Template
from installer import sns, cloudfront, iam, s3, awslambda, elb, route53
class colors:
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
QUESTION = '\033[96m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def write_str(string):
lines = textwrap.wrap(textwrap.dedent(string), 80)
for line in lines:
print(line)
def print_header(string):
print()
print(colors.OKGREEN, end='')
write_str(string)
print(colors.ENDC, end='')
def get_input(prompt, allow_empty=True):
from sys import version_info
py3 = version_info[0] > 2 # creates boolean value for test that Python major version > 2
response = None
while response is None or (not allow_empty and len(response) == 0):
print(colors.QUESTION + "> " + prompt + colors.ENDC, end='')
if py3:
response = input()
else:
response = raw_input()
return response
def get_yn(prompt, default=True):
if default is True:
prompt += "[Y/n]? "
default = True
else:
prompt += "[y/N]? "
default = False
ret = get_input(prompt, allow_empty=True)
if len(ret) == 0:
return default
if ret.lower() == "y" or ret.lower() == "yes":
return True
return False
def get_selection(prompt, options, prompt_after='Please select from the list above', allow_empty=False):
if allow_empty:
prompt_after += "(Empty for none)"
prompt_after += ": "
while True:
print(prompt)
for item in options:
print('[{}] {}'.format(item['selector'], item['prompt']))
print()
choice = get_input(prompt_after, allow_empty=True)
# Allow for empty things if desired
if len(choice) == 0 and allow_empty:
return None
# find and return their choice
for x in options:
if choice == str(x['selector']):
return x['return']
print(colors.WARNING + 'Please enter a valid choice!' + colors.ENDC)
def choose_s3_bucket():
bucket_list = s3.s3_list_buckets()
options = []
for i, bucket in enumerate(bucket_list):
options.append({
'selector': i,
'prompt': bucket,
'return': bucket
})
return get_selection("Select the S3 Bucket to use:", options, prompt_after="Which S3 Bucket?", allow_empty=False)
def wizard_elb(global_config):
print_header("ELB Configuration")
write_str("""\
Now we'll detect your existing Elastic Load Balancers and allow you
to configure them to use SSL. You must select the domain names
you want on the certificate for each ELB.""")
write_str("""\
Note that only DNS validation(via Route53) is supported for ELBs""")
print()
global_config['elb_sites'] = []
global_config['elb_domains'] = []
# Get the list of all Cloudfront Distributions
elb_list = elb.list_elbs()
elb_list_opts = []
for i, elb_name in enumerate(elb_list):
elb_list_opts.append({
'selector': i,
'prompt': elb_name,
'return': elb_name
})
route53_list = route53.list_zones()
route53_list_opts = []
for i, zone in enumerate(route53_list):
route53_list_opts.append({
'selector': i,
'prompt': "{} - {}".format(zone['Name'], zone['Id']),
'return': zone
})
while True:
lb = get_selection("Choose an ELB to configure SSL for(Leave blank for none)", elb_list_opts, prompt_after="Which ELB?", allow_empty=True)
if lb is None:
break
lb_port = get_input("What port number will this certificate be for(HTTPS is 443) [443]?", allow_empty=True)
if len(lb_port) == 0:
lb_port = 443
domains = []
while True:
if len(domains) > 0:
print("Already selected: {}".format(",".join(domains)))
zone = get_selection("Choose a Route53 Zone that points to this load balancer: ", route53_list_opts, prompt_after="Which zone?", allow_empty=True)
# stop when they don't enter anything
if zone is None:
break
# Only allow adding each domain once
if zone['Name'] in domains:
continue
domains.append(zone['Name'])
global_config['elb_domains'].append({
'DOMAIN': zone['Name'],
'ROUTE53_ZONE_ID': zone['Id'],
'VALIDATION_METHODS': ['dns-01']
})
site = {
'ELB_NAME': lb,
'ELB_PORT': lb_port,
'DOMAINS': domains,
}
global_config['elb_sites'].append(site)
def wizard_cf(global_config):
print_header("CloudFront Configuration")
global_config['cf_sites'] = []
global_config['cf_domains'] = []
# Get the list of all Cloudfront Distributions
cf_dist_list = cloudfront.list_distributions()
cf_dist_opts = []
for i, d in enumerate(cf_dist_list):
cf_dist_opts.append({
'selector': i,
'prompt': "{} - {} ({}) ".format(d['Id'], d['Comment'], ", ".join(d['Aliases'])),
'return': d
})
write_str("""\
Now we'll detect your existing CloudFront Distributions and allow you
to configure them to use SSL. Domain names will be automatically
detected from the 'Aliases/CNAMEs' configuration section of each
Distribution.""")
print()
write_str("""\
You will configure each Distribution fully before being presented with
the list of Distributions again. You can configure as many Distributions
as you like.""")
while True:
print()
dist = get_selection("Select a CloudFront Distribution to configure with Lets-Encrypt(leave blank to finish)", cf_dist_opts, prompt_after="Which CloudFront Distribution?", allow_empty=True)
if dist is None:
break
cnames = dist['Aliases']
write_str("The following domain names exist for the selected CloudFront Distribution:")
write_str(" " + ", ".join(cnames))
write_str("Each domain in this list will be validated with Lets-Encrypt and added to the certificate assigned to this Distribution.")
print()
for dns_name in cnames:
domain = {
'DOMAIN': dns_name,
'VALIDATION_METHODS': []
}
print("Choose validation methods for the domain '{}'".format(dns_name))
route53_id = route53.get_zone_id(dns_name)
if route53_id:
write_str(colors.OKGREEN + "Route53 zone detected!" + colors.ENDC)
validate_via_dns = get_yn("Validate using DNS", default=False)
if validate_via_dns:
domain['ROUTE53_ZONE_ID'] = route53_id
domain['VALIDATION_METHODS'].append('dns-01')
else:
write_str(colors.WARNING + "No Route53 zone detected, DNS validation not possible." + colors.ENDC)
validate_via_http = get_yn("Validate using HTTP", default=True)
if validate_via_http:
domain['CLOUDFRONT_ID'] = dist['Id']
domain['VALIDATION_METHODS'].append('http-01')
global_config['cf_domains'].append(domain)
site = {
'CLOUDFRONT_ID': dist['Id'],
'DOMAINS': cnames
}
global_config['cf_sites'].append(site)
def wizard_sns(global_config):
sns_email = None
print_header("Notifications")
write_str("""\
The lambda function can send notifications when a certificate is issued,
errors occur, or other things that may need your attention.
Notifications are optional.""")
use_sns = True
sns_email = get_input("Enter the email address for notifications(blank to disable): ", allow_empty=True)
if len(sns_email) == 0:
use_sns = False
global_config['use_sns'] = use_sns
global_config['sns_email'] = sns_email
def wizard_s3_cfg_bucket(global_config):
print_header("S3 Configuration Bucket")
write_str('An S3 Bucket is required to store configuration. If you already have a bucket you want to use for this choose no and select it from the list. Otherwise let the wizard create one for you.')
create_s3_cfg_bucket = get_yn("Create a bucket for configuration", True)
if create_s3_cfg_bucket:
s3_cfg_bucket = "lambda-letsencrypt-config-{}".format(global_config['ts'])
else:
s3_cfg_bucket = choose_s3_bucket()
global_config['create_s3_cfg_bucket'] = create_s3_cfg_bucket
global_config['s3_cfg_bucket'] = s3_cfg_bucket
def wizard_iam(global_config):
print_header("IAM Configuration")
write_str("An IAM role must be created for this lambda function giving it access to CloudFront, Route53, S3, SNS(notifications), IAM(certificates), and CloudWatch(logs/alarms).")
print()
write_str("If you do not let the wizard create this role you will be asked to select an existing role to use.")
create_iam_role = get_yn("Do you want to automatically create this role", True)
if not create_iam_role:
role_list = iam.list_roles()
options = []
for i, role in enumerate(role_list):
options.append({
'selector': i,
'prompt': role,
'return': role
})
iam_role_name = get_selection("Select the IAM Role:", options, prompt_after="Which IAM Role?", allow_empty=False)
else:
iam_role_name = "lambda-letsencrypt"
global_config['create_iam_role'] = create_iam_role
global_config['iam_role_name'] = iam_role_name
def wizard_challenges(global_config):
create_s3_challenge_bucket = False
s3_challenge_bucket = None
print_header("Lets-Encrypt Challenge Validation Settings")
write_str("""This tool will handle validation of your domains automatically. There are two possible validation methods: HTTP and DNS.""")
print()
write_str("HTTP validation is only available for CloudFront sites. It requires an S3 bucket to store the challenge responses in. This bucket needs to be publicly accessible. Your CloudFront Distribution(s) will be reconfigured to use this bucket as an origin for challenge responses.")
write_str("If you do not configure a bucket for this you will only be able to use DNS validation.")
print()
write_str("DNS validation requires your domain to be managed with Route53. This validation method is always available and requires no additional configuration.")
write_str(colors.WARNING + "Note: DNS validation is currently only supported by the staging server." + colors.ENDC)
print()
write_str("Each domain you want to manage can be configured to validate using either of these methods.")
print()
use_http_challenges = get_yn("Do you want to configure HTTP validation", True)
if use_http_challenges:
create_s3_challenge_bucket = get_yn("Do you want to create a bucket for these challenges(Choose No to select an existing bucket)", True)
if create_s3_challenge_bucket:
s3_challenge_bucket = "lambda-letsencrypt-challenges-{}".format(global_config['ts'])
else:
s3_challenge_bucket = choose_s3_bucket()
else:
# only dns challenge support is available
pass
global_config['use_http_challenges'] = use_http_challenges
global_config['create_s3_challenge_bucket'] = create_s3_challenge_bucket
global_config['s3_challenge_bucket'] = s3_challenge_bucket
def wizard_summary(global_config):
gc = global_config
print_header("**Summary**")
print("Notification Email: {}".format(gc['sns_email'] or "(notifications disabled)"))
print("S3 Config Bucket: {}".format(gc['s3_cfg_bucket']), end="")
if (gc['create_s3_cfg_bucket']):
print(" (to be created)")
else:
print(" (existing)")
if gc['create_iam_role']:
print("IAM Role Name: {} (to be created)".format(gc['iam_role_name']))
else:
print("IAM Role Name: {} (existing)".format(gc['iam_role_name']))
print("Support HTTP Challenges: {}".format(gc['use_http_challenges']))
if gc['use_http_challenges']:
print("S3 HTTP Challenge Bucket: {}".format(gc['s3_challenge_bucket']), end="")
if (gc['create_s3_challenge_bucket']):
print(" (to be created)")
else:
print(" (existing)")
print("Domains To Manage With Lets-Encrypt")
for d in gc['cf_domains']:
print(" {} - [{}]".format(d['DOMAIN'], ",".join(d['VALIDATION_METHODS'])))
for d in gc['elb_domains']:
print(" {} - [{}]".format(d['DOMAIN'], ",".join(d['VALIDATION_METHODS'])))
print("CloudFront Distributions To Manage:")
for cf in gc['cf_sites']:
print(" {} - [{}]".format(cf['CLOUDFRONT_ID'], ",".join(cf['DOMAINS'])))
print("Elastic Load Balancers to Manage:")
for lb in gc['elb_sites']:
print(" {}:{} - [{}]".format(lb['ELB_NAME'], lb['ELB_PORT'], ",".join(lb['DOMAINS'])))
def wizard_save_config(global_config):
print_header("Making Requested Changes")
templatevars = {}
with open('config.py.dist', 'r') as template:
configfile = Template(template.read())
templatevars['SNS_ARN'] = None
templatevars['NOTIFY_EMAIL'] = None
# Configure SNS if appropriate
sns_arn = None
if len(global_config['sns_email']) > 0:
# Create SNS Topic if necessary
print("Creating SNS Topic for Notifications ", end='')
sns_arn = sns.get_or_create_topic(global_config['sns_email'])
if sns_arn is False or sns_arn is None:
print(colors.FAIL + u'\u2717' + colors.ENDC)
else:
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
templatevars['SNS_ARN'] = sns_arn
templatevars['NOTIFY_EMAIL'] = global_config['sns_email']
# create config bucket if necessary
if global_config['create_s3_cfg_bucket']:
print("Creating S3 Configuration Bucket ", end='')
s3.create_bucket(global_config['s3_cfg_bucket'])
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
# create challenge bucket if necessary(needs to be configured as static website)
if global_config['create_s3_challenge_bucket']:
print("Creating S3 Challenge Bucket ", end='')
s3.create_web_bucket(global_config['s3_challenge_bucket'])
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
# create IAM role if required
if global_config['create_iam_role']:
global_config['iam_role_name'] = 'lambda-letsencrypt-test-role'
policy_document = iam.generate_policy_document(
s3buckets=[
global_config['s3_cfg_bucket'],
global_config['s3_challenge_bucket']
],
snstopicarn=sns_arn
)
iam_arn = iam.configure(global_config['iam_role_name'], policy_document)
templatevars['S3_CONFIG_BUCKET'] = global_config['s3_cfg_bucket']
templatevars['S3_CHALLENGE_BUCKET'] = global_config['s3_challenge_bucket']
domains = global_config['cf_domains'] + global_config['elb_domains']
sites = global_config['cf_sites'] + global_config['elb_sites']
templatevars['DOMAINS'] = json.dumps(domains, indent=4)
templatevars['SITES'] = json.dumps(sites, indent=4)
# write out the config file
config = configfile.substitute(templatevars)
with open("config-wizard.py", 'w') as configfinal:
print("Writing Configuration File ", end='')
configfinal.write(config)
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
print("Creating Zip File To Upload To Lambda")
archive_success = True
archive = zipfile.ZipFile('lambda-letsencrypt-dist.zip', mode='w')
try:
for f in ['lambda_function.py', 'simple_acme.py']:
print(" Adding '{}'".format(f))
archive.write(f)
print(" Adding 'config.py'")
archive.write('config-wizard.py', 'config.py')
except Exception as e:
print(colors.FAIL + 'Zip File Creation Failed' + colors.ENDC)
print(e)
archive_success = False
finally:
print('Zip File Created Successfully')
archive.close()
# can't continue if this failed
if not archive_success:
return
print("Configuring Lambda Function:")
iam_arn = iam.get_arn(global_config['iam_role_name'])
print(" IAM ARN: {}".format(iam_arn))
print(" Uploading Function ", end='')
if awslambda.create_function("lambda-letsencrypt", iam_arn, 'lambda-letsencrypt-dist.zip'):
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
else:
print(colors.FAIL + u'\u2717' + colors.ENDC)
return
print_header("Schedule Lambda Function")
write_str("I've done all I can for you now, there's one last step you have to take manually in order to schedule your lambda function to run once a day.")
write_str("Log into your aws console and go to this page:")
lambda_event_url = "https://console.aws.amazon.com/lambda/home#/functions/lambda-letsencrypt?tab=eventSources"
print(colors.OKBLUE + lambda_event_url + colors.ENDC)
print()
write_str('Click on "Add event source". From the dropdown, choose "Scheduled Event". Enter the following:')
write_str("Name: 'daily - rate(1 day)'")
write_str("Description: 'Run every day'")
write_str("Schedule Expression: 'rate(1 day)'")
print()
write_str("Choose to 'Enable Now', then click 'Submit'")
print_header("Testing")
write_str("You may want to test this before you set it to be recurring. Click on the 'Test' button in the AWS Console for the lambda-letsencrypt function. The data you provide to this function does not matter. Make sure to review the logs after it finishes and check for anything out of the ordinary.")
print()
write_str("It will take at least 2 runs before your certificates are issued, maybe 3 depending on how fast cloudfront responds. This is because it needs one try to configure cloudfront, one to submit the challenge and have it verified, and one final run to issue the certificate and configure the cloudfront distribution")
def wizard(global_config):
ts = int(time.time())
ts = 1000
global_config['ts'] = ts
print_header("Lambda Lets-Encrypt Wizard")
write_str("""\
This wizard will guide you through the process of setting up your existing
CloudFront Distributions to use SSL certificates provided by Lets-Encrypt
and automatically issued/maintained by an AWS Lambda function.
These certificates are free of charge, and valid for 90 days. This wizard
will also set up a Lambda function that is responsible for issuing and
renewing these certificates automatically as they near their expiration
date.
The cost of the AWS services used to make this work are typically less
than a penny per month. For full pricing details please refer to the
docs.
""")
print()
print(colors.WARNING + "WARNING: ")
write_str("""\
Manual configuration is required at this time to configure the Lambda
function to run on a daily basis to keep your certificate updated. If
you do not follow the steps provided at the end of this wizard your
Lambda function will *NOT* run.
""")
print(colors.ENDC)
wizard_sns(global_config)
wizard_iam(global_config)
wizard_s3_cfg_bucket(global_config)
wizard_challenges(global_config)
wizard_cf(global_config)
wizard_elb(global_config)
cfg_menu = []
cfg_menu.append({'selector': 1, 'prompt': 'SNS', 'return': wizard_sns})
cfg_menu.append({'selector': 2, 'prompt': 'IAM', 'return': wizard_iam})
cfg_menu.append({'selector': 3, 'prompt': 'S3 Config', 'return': wizard_s3_cfg_bucket})
cfg_menu.append({'selector': 4, 'prompt': 'Challenges', 'return': wizard_challenges})
cfg_menu.append({'selector': 5, 'prompt': 'CloudFront', 'return': wizard_cf})
cfg_menu.append({'selector': 6, 'prompt': 'Elastic Load Balancers', 'return': wizard_cf})
cfg_menu.append({'selector': 9, 'prompt': 'Done', 'return': None})
finished = False
while not finished:
wizard_summary(global_config)
finished = get_yn("Are these settings correct", True)
if not finished:
selection = get_selection("Which section do you want to change", cfg_menu, prompt_after="Which section to modify?", allow_empty=False)
if selection:
selection(global_config)
wizard_save_config(global_config)
if __name__ == "__main__":
args = docopt(__doc__, version='Lambda Lets-Encrypt 1.0')
global_config = {}
wizard(global_config)
| 39.566728 | 326 | 0.64321 |
from __future__ import print_function
import json
import textwrap
import time
import zipfile
from docopt import docopt
from string import Template
from installer import sns, cloudfront, iam, s3, awslambda, elb, route53
class colors:
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
QUESTION = '\033[96m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def write_str(string):
lines = textwrap.wrap(textwrap.dedent(string), 80)
for line in lines:
print(line)
def print_header(string):
print()
print(colors.OKGREEN, end='')
write_str(string)
print(colors.ENDC, end='')
def get_input(prompt, allow_empty=True):
from sys import version_info
py3 = version_info[0] > 2
response = None
while response is None or (not allow_empty and len(response) == 0):
print(colors.QUESTION + "> " + prompt + colors.ENDC, end='')
if py3:
response = input()
else:
response = raw_input()
return response
def get_yn(prompt, default=True):
if default is True:
prompt += "[Y/n]? "
default = True
else:
prompt += "[y/N]? "
default = False
ret = get_input(prompt, allow_empty=True)
if len(ret) == 0:
return default
if ret.lower() == "y" or ret.lower() == "yes":
return True
return False
def get_selection(prompt, options, prompt_after='Please select from the list above', allow_empty=False):
if allow_empty:
prompt_after += "(Empty for none)"
prompt_after += ": "
while True:
print(prompt)
for item in options:
print('[{}] {}'.format(item['selector'], item['prompt']))
print()
choice = get_input(prompt_after, allow_empty=True)
if len(choice) == 0 and allow_empty:
return None
for x in options:
if choice == str(x['selector']):
return x['return']
print(colors.WARNING + 'Please enter a valid choice!' + colors.ENDC)
def choose_s3_bucket():
bucket_list = s3.s3_list_buckets()
options = []
for i, bucket in enumerate(bucket_list):
options.append({
'selector': i,
'prompt': bucket,
'return': bucket
})
return get_selection("Select the S3 Bucket to use:", options, prompt_after="Which S3 Bucket?", allow_empty=False)
def wizard_elb(global_config):
print_header("ELB Configuration")
write_str("""\
Now we'll detect your existing Elastic Load Balancers and allow you
to configure them to use SSL. You must select the domain names
you want on the certificate for each ELB.""")
write_str("""\
Note that only DNS validation(via Route53) is supported for ELBs""")
print()
global_config['elb_sites'] = []
global_config['elb_domains'] = []
# Get the list of all Cloudfront Distributions
elb_list = elb.list_elbs()
elb_list_opts = []
for i, elb_name in enumerate(elb_list):
elb_list_opts.append({
'selector': i,
'prompt': elb_name,
'return': elb_name
})
route53_list = route53.list_zones()
route53_list_opts = []
for i, zone in enumerate(route53_list):
route53_list_opts.append({
'selector': i,
'prompt': "{} - {}".format(zone['Name'], zone['Id']),
'return': zone
})
while True:
lb = get_selection("Choose an ELB to configure SSL for(Leave blank for none)", elb_list_opts, prompt_after="Which ELB?", allow_empty=True)
if lb is None:
break
lb_port = get_input("What port number will this certificate be for(HTTPS is 443) [443]?", allow_empty=True)
if len(lb_port) == 0:
lb_port = 443
domains = []
while True:
if len(domains) > 0:
print("Already selected: {}".format(",".join(domains)))
zone = get_selection("Choose a Route53 Zone that points to this load balancer: ", route53_list_opts, prompt_after="Which zone?", allow_empty=True)
# stop when they don't enter anything
if zone is None:
break
if zone['Name'] in domains:
continue
domains.append(zone['Name'])
global_config['elb_domains'].append({
'DOMAIN': zone['Name'],
'ROUTE53_ZONE_ID': zone['Id'],
'VALIDATION_METHODS': ['dns-01']
})
site = {
'ELB_NAME': lb,
'ELB_PORT': lb_port,
'DOMAINS': domains,
}
global_config['elb_sites'].append(site)
def wizard_cf(global_config):
print_header("CloudFront Configuration")
global_config['cf_sites'] = []
global_config['cf_domains'] = []
cf_dist_list = cloudfront.list_distributions()
cf_dist_opts = []
for i, d in enumerate(cf_dist_list):
cf_dist_opts.append({
'selector': i,
'prompt': "{} - {} ({}) ".format(d['Id'], d['Comment'], ", ".join(d['Aliases'])),
'return': d
})
write_str("""\
Now we'll detect your existing CloudFront Distributions and allow you
to configure them to use SSL. Domain names will be automatically
detected from the 'Aliases/CNAMEs' configuration section of each
Distribution.""")
print()
write_str("""\
You will configure each Distribution fully before being presented with
the list of Distributions again. You can configure as many Distributions
as you like.""")
while True:
print()
dist = get_selection("Select a CloudFront Distribution to configure with Lets-Encrypt(leave blank to finish)", cf_dist_opts, prompt_after="Which CloudFront Distribution?", allow_empty=True)
if dist is None:
break
cnames = dist['Aliases']
write_str("The following domain names exist for the selected CloudFront Distribution:")
write_str(" " + ", ".join(cnames))
write_str("Each domain in this list will be validated with Lets-Encrypt and added to the certificate assigned to this Distribution.")
print()
for dns_name in cnames:
domain = {
'DOMAIN': dns_name,
'VALIDATION_METHODS': []
}
print("Choose validation methods for the domain '{}'".format(dns_name))
route53_id = route53.get_zone_id(dns_name)
if route53_id:
write_str(colors.OKGREEN + "Route53 zone detected!" + colors.ENDC)
validate_via_dns = get_yn("Validate using DNS", default=False)
if validate_via_dns:
domain['ROUTE53_ZONE_ID'] = route53_id
domain['VALIDATION_METHODS'].append('dns-01')
else:
write_str(colors.WARNING + "No Route53 zone detected, DNS validation not possible." + colors.ENDC)
validate_via_http = get_yn("Validate using HTTP", default=True)
if validate_via_http:
domain['CLOUDFRONT_ID'] = dist['Id']
domain['VALIDATION_METHODS'].append('http-01')
global_config['cf_domains'].append(domain)
site = {
'CLOUDFRONT_ID': dist['Id'],
'DOMAINS': cnames
}
global_config['cf_sites'].append(site)
def wizard_sns(global_config):
sns_email = None
print_header("Notifications")
write_str("""\
The lambda function can send notifications when a certificate is issued,
errors occur, or other things that may need your attention.
Notifications are optional.""")
use_sns = True
sns_email = get_input("Enter the email address for notifications(blank to disable): ", allow_empty=True)
if len(sns_email) == 0:
use_sns = False
global_config['use_sns'] = use_sns
global_config['sns_email'] = sns_email
def wizard_s3_cfg_bucket(global_config):
print_header("S3 Configuration Bucket")
write_str('An S3 Bucket is required to store configuration. If you already have a bucket you want to use for this choose no and select it from the list. Otherwise let the wizard create one for you.')
create_s3_cfg_bucket = get_yn("Create a bucket for configuration", True)
if create_s3_cfg_bucket:
s3_cfg_bucket = "lambda-letsencrypt-config-{}".format(global_config['ts'])
else:
s3_cfg_bucket = choose_s3_bucket()
global_config['create_s3_cfg_bucket'] = create_s3_cfg_bucket
global_config['s3_cfg_bucket'] = s3_cfg_bucket
def wizard_iam(global_config):
print_header("IAM Configuration")
write_str("An IAM role must be created for this lambda function giving it access to CloudFront, Route53, S3, SNS(notifications), IAM(certificates), and CloudWatch(logs/alarms).")
print()
write_str("If you do not let the wizard create this role you will be asked to select an existing role to use.")
create_iam_role = get_yn("Do you want to automatically create this role", True)
if not create_iam_role:
role_list = iam.list_roles()
options = []
for i, role in enumerate(role_list):
options.append({
'selector': i,
'prompt': role,
'return': role
})
iam_role_name = get_selection("Select the IAM Role:", options, prompt_after="Which IAM Role?", allow_empty=False)
else:
iam_role_name = "lambda-letsencrypt"
global_config['create_iam_role'] = create_iam_role
global_config['iam_role_name'] = iam_role_name
def wizard_challenges(global_config):
create_s3_challenge_bucket = False
s3_challenge_bucket = None
print_header("Lets-Encrypt Challenge Validation Settings")
write_str("""This tool will handle validation of your domains automatically. There are two possible validation methods: HTTP and DNS.""")
print()
write_str("HTTP validation is only available for CloudFront sites. It requires an S3 bucket to store the challenge responses in. This bucket needs to be publicly accessible. Your CloudFront Distribution(s) will be reconfigured to use this bucket as an origin for challenge responses.")
write_str("If you do not configure a bucket for this you will only be able to use DNS validation.")
print()
write_str("DNS validation requires your domain to be managed with Route53. This validation method is always available and requires no additional configuration.")
write_str(colors.WARNING + "Note: DNS validation is currently only supported by the staging server." + colors.ENDC)
print()
write_str("Each domain you want to manage can be configured to validate using either of these methods.")
print()
use_http_challenges = get_yn("Do you want to configure HTTP validation", True)
if use_http_challenges:
create_s3_challenge_bucket = get_yn("Do you want to create a bucket for these challenges(Choose No to select an existing bucket)", True)
if create_s3_challenge_bucket:
s3_challenge_bucket = "lambda-letsencrypt-challenges-{}".format(global_config['ts'])
else:
s3_challenge_bucket = choose_s3_bucket()
else:
# only dns challenge support is available
pass
global_config['use_http_challenges'] = use_http_challenges
global_config['create_s3_challenge_bucket'] = create_s3_challenge_bucket
global_config['s3_challenge_bucket'] = s3_challenge_bucket
def wizard_summary(global_config):
gc = global_config
print_header("**Summary**")
print("Notification Email: {}".format(gc['sns_email'] or "(notifications disabled)"))
print("S3 Config Bucket: {}".format(gc['s3_cfg_bucket']), end="")
if (gc['create_s3_cfg_bucket']):
print(" (to be created)")
else:
print(" (existing)")
if gc['create_iam_role']:
print("IAM Role Name: {} (to be created)".format(gc['iam_role_name']))
else:
print("IAM Role Name: {} (existing)".format(gc['iam_role_name']))
print("Support HTTP Challenges: {}".format(gc['use_http_challenges']))
if gc['use_http_challenges']:
print("S3 HTTP Challenge Bucket: {}".format(gc['s3_challenge_bucket']), end="")
if (gc['create_s3_challenge_bucket']):
print(" (to be created)")
else:
print(" (existing)")
print("Domains To Manage With Lets-Encrypt")
for d in gc['cf_domains']:
print(" {} - [{}]".format(d['DOMAIN'], ",".join(d['VALIDATION_METHODS'])))
for d in gc['elb_domains']:
print(" {} - [{}]".format(d['DOMAIN'], ",".join(d['VALIDATION_METHODS'])))
print("CloudFront Distributions To Manage:")
for cf in gc['cf_sites']:
print(" {} - [{}]".format(cf['CLOUDFRONT_ID'], ",".join(cf['DOMAINS'])))
print("Elastic Load Balancers to Manage:")
for lb in gc['elb_sites']:
print(" {}:{} - [{}]".format(lb['ELB_NAME'], lb['ELB_PORT'], ",".join(lb['DOMAINS'])))
def wizard_save_config(global_config):
print_header("Making Requested Changes")
templatevars = {}
with open('config.py.dist', 'r') as template:
configfile = Template(template.read())
templatevars['SNS_ARN'] = None
templatevars['NOTIFY_EMAIL'] = None
# Configure SNS if appropriate
sns_arn = None
if len(global_config['sns_email']) > 0:
# Create SNS Topic if necessary
print("Creating SNS Topic for Notifications ", end='')
sns_arn = sns.get_or_create_topic(global_config['sns_email'])
if sns_arn is False or sns_arn is None:
print(colors.FAIL + u'\u2717' + colors.ENDC)
else:
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
templatevars['SNS_ARN'] = sns_arn
templatevars['NOTIFY_EMAIL'] = global_config['sns_email']
# create config bucket if necessary
if global_config['create_s3_cfg_bucket']:
print("Creating S3 Configuration Bucket ", end='')
s3.create_bucket(global_config['s3_cfg_bucket'])
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
# create challenge bucket if necessary(needs to be configured as static website)
if global_config['create_s3_challenge_bucket']:
print("Creating S3 Challenge Bucket ", end='')
s3.create_web_bucket(global_config['s3_challenge_bucket'])
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
# create IAM role if required
if global_config['create_iam_role']:
global_config['iam_role_name'] = 'lambda-letsencrypt-test-role'
policy_document = iam.generate_policy_document(
s3buckets=[
global_config['s3_cfg_bucket'],
global_config['s3_challenge_bucket']
],
snstopicarn=sns_arn
)
iam_arn = iam.configure(global_config['iam_role_name'], policy_document)
templatevars['S3_CONFIG_BUCKET'] = global_config['s3_cfg_bucket']
templatevars['S3_CHALLENGE_BUCKET'] = global_config['s3_challenge_bucket']
domains = global_config['cf_domains'] + global_config['elb_domains']
sites = global_config['cf_sites'] + global_config['elb_sites']
templatevars['DOMAINS'] = json.dumps(domains, indent=4)
templatevars['SITES'] = json.dumps(sites, indent=4)
# write out the config file
config = configfile.substitute(templatevars)
with open("config-wizard.py", 'w') as configfinal:
print("Writing Configuration File ", end='')
configfinal.write(config)
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
print("Creating Zip File To Upload To Lambda")
archive_success = True
archive = zipfile.ZipFile('lambda-letsencrypt-dist.zip', mode='w')
try:
for f in ['lambda_function.py', 'simple_acme.py']:
print(" Adding '{}'".format(f))
archive.write(f)
print(" Adding 'config.py'")
archive.write('config-wizard.py', 'config.py')
except Exception as e:
print(colors.FAIL + 'Zip File Creation Failed' + colors.ENDC)
print(e)
archive_success = False
finally:
print('Zip File Created Successfully')
archive.close()
# can't continue if this failed
if not archive_success:
return
print("Configuring Lambda Function:")
iam_arn = iam.get_arn(global_config['iam_role_name'])
print(" IAM ARN: {}".format(iam_arn))
print(" Uploading Function ", end='')
if awslambda.create_function("lambda-letsencrypt", iam_arn, 'lambda-letsencrypt-dist.zip'):
print(colors.OKGREEN + u'\u2713' + colors.ENDC)
else:
print(colors.FAIL + u'\u2717' + colors.ENDC)
return
print_header("Schedule Lambda Function")
write_str("I've done all I can for you now, there's one last step you have to take manually in order to schedule your lambda function to run once a day.")
write_str("Log into your aws console and go to this page:")
lambda_event_url = "https://console.aws.amazon.com/lambda/home#/functions/lambda-letsencrypt?tab=eventSources"
print(colors.OKBLUE + lambda_event_url + colors.ENDC)
print()
write_str('Click on "Add event source". From the dropdown, choose "Scheduled Event". Enter the following:')
write_str("Name: 'daily - rate(1 day)'")
write_str("Description: 'Run every day'")
write_str("Schedule Expression: 'rate(1 day)'")
print()
write_str("Choose to 'Enable Now', then click 'Submit'")
print_header("Testing")
write_str("You may want to test this before you set it to be recurring. Click on the 'Test' button in the AWS Console for the lambda-letsencrypt function. The data you provide to this function does not matter. Make sure to review the logs after it finishes and check for anything out of the ordinary.")
print()
write_str("It will take at least 2 runs before your certificates are issued, maybe 3 depending on how fast cloudfront responds. This is because it needs one try to configure cloudfront, one to submit the challenge and have it verified, and one final run to issue the certificate and configure the cloudfront distribution")
def wizard(global_config):
ts = int(time.time())
ts = 1000
global_config['ts'] = ts
print_header("Lambda Lets-Encrypt Wizard")
write_str("""\
This wizard will guide you through the process of setting up your existing
CloudFront Distributions to use SSL certificates provided by Lets-Encrypt
and automatically issued/maintained by an AWS Lambda function.
These certificates are free of charge, and valid for 90 days. This wizard
will also set up a Lambda function that is responsible for issuing and
renewing these certificates automatically as they near their expiration
date.
The cost of the AWS services used to make this work are typically less
than a penny per month. For full pricing details please refer to the
docs.
""")
print()
print(colors.WARNING + "WARNING: ")
write_str("""\
Manual configuration is required at this time to configure the Lambda
function to run on a daily basis to keep your certificate updated. If
you do not follow the steps provided at the end of this wizard your
Lambda function will *NOT* run.
""")
print(colors.ENDC)
wizard_sns(global_config)
wizard_iam(global_config)
wizard_s3_cfg_bucket(global_config)
wizard_challenges(global_config)
wizard_cf(global_config)
wizard_elb(global_config)
cfg_menu = []
cfg_menu.append({'selector': 1, 'prompt': 'SNS', 'return': wizard_sns})
cfg_menu.append({'selector': 2, 'prompt': 'IAM', 'return': wizard_iam})
cfg_menu.append({'selector': 3, 'prompt': 'S3 Config', 'return': wizard_s3_cfg_bucket})
cfg_menu.append({'selector': 4, 'prompt': 'Challenges', 'return': wizard_challenges})
cfg_menu.append({'selector': 5, 'prompt': 'CloudFront', 'return': wizard_cf})
cfg_menu.append({'selector': 6, 'prompt': 'Elastic Load Balancers', 'return': wizard_cf})
cfg_menu.append({'selector': 9, 'prompt': 'Done', 'return': None})
finished = False
while not finished:
wizard_summary(global_config)
finished = get_yn("Are these settings correct", True)
if not finished:
selection = get_selection("Which section do you want to change", cfg_menu, prompt_after="Which section to modify?", allow_empty=False)
if selection:
selection(global_config)
wizard_save_config(global_config)
if __name__ == "__main__":
args = docopt(__doc__, version='Lambda Lets-Encrypt 1.0')
global_config = {}
wizard(global_config)
| true | true |
f728784e949d8d6569f386295bf0297dec24e84d | 3,568 | py | Python | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/multi_variables_v1.py | globotree/tensorflow | b944fb947898de8cb4279a5a8a066955ba685412 | [
"Apache-2.0"
] | 12 | 2020-12-28T18:42:10.000Z | 2022-03-24T17:34:21.000Z | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/multi_variables_v1.py | globotree/tensorflow | b944fb947898de8cb4279a5a8a066955ba685412 | [
"Apache-2.0"
] | 2 | 2021-08-25T15:58:11.000Z | 2022-02-10T01:47:24.000Z | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/multi_variables_v1.py | globotree/tensorflow | b944fb947898de8cb4279a5a8a066955ba685412 | [
"Apache-2.0"
] | 3 | 2020-03-09T19:17:02.000Z | 2020-06-26T23:14:31.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/multi_variables_v1 | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1
# Verify that the tf.versions attribute exists. It is difficult to enforce
# contents, since the version numbers change over time. The conversion logic
# itself is verified in the common graphdef converter, so here just assert
# it is being invoked.
# CHECK: module
# CHECK-SAME: tf.versions
# CHECK-SAME: bad_consumers
# CHECK-SAME: min_consumer
# CHECK-SAME: producer
# CHECK: "tf_saved_model.global_tensor"() {is_mutable, sym_name = "y", type = tensor<1x3xf32>, value = {{.*}} : tensor<1x3xf32>} : () -> ()
# CHECK: "tf_saved_model.global_tensor"() {is_mutable, sym_name = "z", type = tensor<3x3xf32>, value = {{.*}} : tensor<3x3xf32>} : () -> ()
# CHECK: func @basic([[ARG0:%.*]]: tensor<3x1xf32>,
# CHECK-SAME: [[ARG1:%.*]]: tensor<!tf.resource<tensor<1x3xf32>>> {tf_saved_model.bound_input = @y}
# CHECK-SAME: [[ARG2:%.*]]: tensor<!tf.resource<tensor<3x3xf32>>> {tf_saved_model.bound_input = @z}) -> tensor<3x3xf32>
# CHECK-NEXT: [[R0:%.*]] = "tf.ReadVariableOp"([[ARG1]]) {{{.*}}} : (tensor<!tf.resource<tensor<1x3xf32>>>) -> tensor<1x3xf32>
# CHECK-NEXT: [[R1:%.*]] = "tf.MatMul"([[ARG0]], [[R0]]) {{{.*}}} : (tensor<3x1xf32>, tensor<1x3xf32>) -> tensor<3x3xf32>
# CHECK-NEXT: [[R2:%.*]] = "tf.ReadVariableOp"([[ARG2]]) {{{.*}}} : (tensor<!tf.resource<tensor<3x3xf32>>>) -> tensor<3x3xf32>
# CHECK-NEXT: [[R3:%.*]] = "tf.MatMul"([[R1]], [[R2]]) {{{.*}}} : (tensor<3x3xf32>, tensor<3x3xf32>) -> tensor<3x3xf32>
# CHECK-NEXT: return [[R3]] : tensor<3x3xf32>
def Test():
# Default TF1.x uses reference variables that are not supported by SavedModel
# v1 Importer. To use SavedModel V1 Importer, resource variables should be
# enabled.
tf.compat.v1.enable_resource_variables()
tf.compat.v1.disable_eager_execution()
x = tf.constant([[1.0], [1.0], [1.0]])
y = tf.compat.v1.get_variable(
name='y',
shape=(1, 3),
initializer=tf.random_normal_initializer(),
trainable=True)
z = tf.compat.v1.get_variable(
name='z',
shape=(3, 3),
initializer=tf.random_normal_initializer(),
trainable=True)
r = tf.matmul(x, y)
s = tf.matmul(r, z)
tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)
tensor_info_s = tf.compat.v1.saved_model.utils.build_tensor_info(s)
return {
'basic':
(tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'s': tensor_info_s},
method_name=tf.saved_model.PREDICT_METHOD_NAME))
}
if __name__ == '__main__':
common_v1.do_test(Test())
| 41.976471 | 139 | 0.672085 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1
def Test():
tf.compat.v1.enable_resource_variables()
tf.compat.v1.disable_eager_execution()
x = tf.constant([[1.0], [1.0], [1.0]])
y = tf.compat.v1.get_variable(
name='y',
shape=(1, 3),
initializer=tf.random_normal_initializer(),
trainable=True)
z = tf.compat.v1.get_variable(
name='z',
shape=(3, 3),
initializer=tf.random_normal_initializer(),
trainable=True)
r = tf.matmul(x, y)
s = tf.matmul(r, z)
tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)
tensor_info_s = tf.compat.v1.saved_model.utils.build_tensor_info(s)
return {
'basic':
(tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'s': tensor_info_s},
method_name=tf.saved_model.PREDICT_METHOD_NAME))
}
if __name__ == '__main__':
common_v1.do_test(Test())
| true | true |
f7287953922202a63a987f44cfd22e5f4bd917b7 | 66 | py | Python | lib/__init__.py | linex-cd/puf | 6da93b485b4881c12975d5af1715480a7bffc45c | [
"Apache-2.0"
] | 5 | 2018-01-02T10:27:52.000Z | 2018-05-01T16:01:01.000Z | lib/__init__.py | linex-cd/puf | 6da93b485b4881c12975d5af1715480a7bffc45c | [
"Apache-2.0"
] | null | null | null | lib/__init__.py | linex-cd/puf | 6da93b485b4881c12975d5af1715480a7bffc45c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from . import db;
from . import network;
| 16.5 | 24 | 0.590909 |
from . import db;
from . import network;
| true | true |
f7287956dcc22ab00447e566309832ba2d832ec1 | 3,309 | py | Python | env/lib/python2.7/site-packages/grpc/_plugin_wrapping.py | husky-parul/SheHacks | 19383029947f50ebaf07232c9b2ee76c75d8ada6 | [
"Apache-2.0"
] | 2 | 2018-02-01T06:30:24.000Z | 2018-04-12T15:39:56.000Z | env/lib/python2.7/site-packages/grpc/_plugin_wrapping.py | husky-parul/SheHacks | 19383029947f50ebaf07232c9b2ee76c75d8ada6 | [
"Apache-2.0"
] | null | null | null | env/lib/python2.7/site-packages/grpc/_plugin_wrapping.py | husky-parul/SheHacks | 19383029947f50ebaf07232c9b2ee76c75d8ada6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import threading
import grpc
from grpc import _common
from grpc._cython import cygrpc
class _AuthMetadataContext(
collections.namedtuple('AuthMetadataContext', (
'service_url', 'method_name',)), grpc.AuthMetadataContext):
pass
class _CallbackState(object):
def __init__(self):
self.lock = threading.Lock()
self.called = False
self.exception = None
class _AuthMetadataPluginCallback(grpc.AuthMetadataPluginCallback):
def __init__(self, state, callback):
self._state = state
self._callback = callback
def __call__(self, metadata, error):
with self._state.lock:
if self._state.exception is None:
if self._state.called:
raise RuntimeError(
'AuthMetadataPluginCallback invoked more than once!')
else:
self._state.called = True
else:
raise RuntimeError(
'AuthMetadataPluginCallback raised exception "{}"!'.format(
self._state.exception))
if error is None:
self._callback(metadata, cygrpc.StatusCode.ok, None)
else:
self._callback(None, cygrpc.StatusCode.internal,
_common.encode(str(error)))
class _Plugin(object):
def __init__(self, metadata_plugin):
self._metadata_plugin = metadata_plugin
def __call__(self, service_url, method_name, callback):
context = _AuthMetadataContext(
_common.decode(service_url), _common.decode(method_name))
callback_state = _CallbackState()
try:
self._metadata_plugin(
context, _AuthMetadataPluginCallback(callback_state, callback))
except Exception as exception: # pylint: disable=broad-except
logging.exception(
'AuthMetadataPluginCallback "%s" raised exception!',
self._metadata_plugin)
with callback_state.lock:
callback_state.exception = exception
if callback_state.called:
return
callback(None, cygrpc.StatusCode.internal,
_common.encode(str(exception)))
def metadata_plugin_call_credentials(metadata_plugin, name):
if name is None:
try:
effective_name = metadata_plugin.__name__
except AttributeError:
effective_name = metadata_plugin.__class__.__name__
else:
effective_name = name
return grpc.CallCredentials(
cygrpc.MetadataPluginCallCredentials(
_Plugin(metadata_plugin), _common.encode(effective_name)))
| 33.765306 | 79 | 0.648534 |
import collections
import logging
import threading
import grpc
from grpc import _common
from grpc._cython import cygrpc
class _AuthMetadataContext(
collections.namedtuple('AuthMetadataContext', (
'service_url', 'method_name',)), grpc.AuthMetadataContext):
pass
class _CallbackState(object):
def __init__(self):
self.lock = threading.Lock()
self.called = False
self.exception = None
class _AuthMetadataPluginCallback(grpc.AuthMetadataPluginCallback):
def __init__(self, state, callback):
self._state = state
self._callback = callback
def __call__(self, metadata, error):
with self._state.lock:
if self._state.exception is None:
if self._state.called:
raise RuntimeError(
'AuthMetadataPluginCallback invoked more than once!')
else:
self._state.called = True
else:
raise RuntimeError(
'AuthMetadataPluginCallback raised exception "{}"!'.format(
self._state.exception))
if error is None:
self._callback(metadata, cygrpc.StatusCode.ok, None)
else:
self._callback(None, cygrpc.StatusCode.internal,
_common.encode(str(error)))
class _Plugin(object):
def __init__(self, metadata_plugin):
self._metadata_plugin = metadata_plugin
def __call__(self, service_url, method_name, callback):
context = _AuthMetadataContext(
_common.decode(service_url), _common.decode(method_name))
callback_state = _CallbackState()
try:
self._metadata_plugin(
context, _AuthMetadataPluginCallback(callback_state, callback))
except Exception as exception:
logging.exception(
'AuthMetadataPluginCallback "%s" raised exception!',
self._metadata_plugin)
with callback_state.lock:
callback_state.exception = exception
if callback_state.called:
return
callback(None, cygrpc.StatusCode.internal,
_common.encode(str(exception)))
def metadata_plugin_call_credentials(metadata_plugin, name):
if name is None:
try:
effective_name = metadata_plugin.__name__
except AttributeError:
effective_name = metadata_plugin.__class__.__name__
else:
effective_name = name
return grpc.CallCredentials(
cygrpc.MetadataPluginCallCredentials(
_Plugin(metadata_plugin), _common.encode(effective_name)))
| true | true |
f7287a635c6bc81cd38b424d6f66fce957c23b43 | 571 | py | Python | apphv/core/migrations/0031_auto_20190624_1503.py | FerneyMoreno20/Portfolio | 59eaa4f4f6762386fe84450f65f508be1414f857 | [
"bzip2-1.0.6"
] | null | null | null | apphv/core/migrations/0031_auto_20190624_1503.py | FerneyMoreno20/Portfolio | 59eaa4f4f6762386fe84450f65f508be1414f857 | [
"bzip2-1.0.6"
] | 6 | 2019-12-04T23:34:47.000Z | 2021-06-09T18:01:16.000Z | apphv/core/migrations/0031_auto_20190624_1503.py | FerneyMoreno20/Portfolio | 59eaa4f4f6762386fe84450f65f508be1414f857 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 2.2.2 on 2019-06-24 15:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0030_auto_20190624_1458'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='tipom',
field=models.CharField(choices=[('Felicitación', 'Felicitación'), ('Queja', 'Queja'), ('Petición', 'Petición'), ('Solicitud', 'Solicitud'), ('Reclamo', 'Reclamo')], default='Petición', max_length=50, verbose_name='Categoría'),
),
]
| 30.052632 | 238 | 0.618214 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0030_auto_20190624_1458'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='tipom',
field=models.CharField(choices=[('Felicitación', 'Felicitación'), ('Queja', 'Queja'), ('Petición', 'Petición'), ('Solicitud', 'Solicitud'), ('Reclamo', 'Reclamo')], default='Petición', max_length=50, verbose_name='Categoría'),
),
]
| true | true |
f7287bf8ea5274c9b3d29cb36d1d4400e8ff6f01 | 14,856 | py | Python | pipe-cli/src/utilities/storage/common.py | ZMaratovna/cloud-pipeline | 542b8394f9fade8eb0ef5603568348c3f20a758d | [
"Apache-2.0"
] | 126 | 2019-03-22T19:40:38.000Z | 2022-02-16T13:01:44.000Z | pipe-cli/src/utilities/storage/common.py | ZMaratovna/cloud-pipeline | 542b8394f9fade8eb0ef5603568348c3f20a758d | [
"Apache-2.0"
] | 1,189 | 2019-03-25T10:39:27.000Z | 2022-03-31T12:50:33.000Z | pipe-cli/src/utilities/storage/common.py | ZMaratovna/cloud-pipeline | 542b8394f9fade8eb0ef5603568348c3f20a758d | [
"Apache-2.0"
] | 62 | 2019-03-22T22:09:49.000Z | 2022-03-08T12:05:56.000Z | # Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from abc import abstractmethod, ABCMeta
from collections import namedtuple
import click
import jwt
from src.config import Config
from src.model.data_storage_wrapper_type import WrapperType
TransferResult = namedtuple('TransferResult', ['source_key', 'destination_key', 'destination_version', 'tags'])
UploadResult = namedtuple('UploadResult', ['source_key', 'destination_key', 'destination_version', 'tags'])
class StorageOperations:
PATH_SEPARATOR = '/'
DEFAULT_PAGE_SIZE = 100
MAX_TAGS_NUMBER = 10
MAX_KEY_LENGTH = 128
MAX_VALUE_LENGTH = 256
TAG_SHORTEN_SUFFIX = '...'
TAGS_VALIDATION_PATTERN = re.compile('[^a-zA-Z0-9\s_.\-@:+/\\\]+')
CP_SOURCE_TAG = 'CP_SOURCE'
CP_OWNER_TAG = 'CP_OWNER'
STORAGE_PATH = '%s://%s/%s'
__config__ = None
@classmethod
def get_proxy_config(cls, target_url=None):
if cls.__config__ is None:
cls.__config__ = Config.instance()
if cls.__config__.proxy is None:
return None
else:
return cls.__config__.resolve_proxy(target_url=target_url)
@classmethod
def init_wrapper(cls, wrapper, versioning=False):
delimiter = StorageOperations.PATH_SEPARATOR
prefix = StorageOperations.get_prefix(wrapper.path)
check_file = True
if prefix.endswith(delimiter):
prefix = prefix[:-1]
check_file = False
listing_manager = wrapper.get_list_manager(show_versions=versioning)
for item in listing_manager.list_items(prefix, show_all=True):
if prefix.endswith(item.name.rstrip(delimiter)) and (check_file or item.type == 'Folder'):
wrapper.exists_flag = True
wrapper.is_file_flag = item.type == 'File'
break
return wrapper
@classmethod
def get_prefix(cls, path, delimiter=PATH_SEPARATOR):
if path:
prefix = path
if prefix.startswith(delimiter):
prefix = prefix[1:]
else:
prefix = delimiter
return prefix
@classmethod
def get_item_name(cls, path, prefix, delimiter=PATH_SEPARATOR):
possible_folder_name = prefix if prefix.endswith(delimiter) else \
prefix + StorageOperations.PATH_SEPARATOR
if prefix and path.startswith(prefix) and path != possible_folder_name and path != prefix:
if not path == prefix:
splitted = prefix.split(StorageOperations.PATH_SEPARATOR)
return splitted[len(splitted) - 1] + path[len(prefix):]
else:
return path[len(prefix):]
elif not path.endswith(StorageOperations.PATH_SEPARATOR) and path == prefix:
return os.path.basename(path)
elif path == possible_folder_name:
return os.path.basename(path.rstrip(StorageOperations.PATH_SEPARATOR)) + StorageOperations.PATH_SEPARATOR
else:
return path
@classmethod
def normalize_path(cls, destination_wrapper, relative_path, delimiter=PATH_SEPARATOR):
if destination_wrapper.path.endswith(delimiter) or not destination_wrapper.is_file():
if os.path.sep != delimiter:
relative_path = relative_path.replace(os.path.sep, delimiter)
skip_separator = destination_wrapper.path.endswith(delimiter)
if destination_wrapper.path:
if skip_separator:
destination_key = destination_wrapper.path + relative_path
else:
destination_key = destination_wrapper.path + delimiter + relative_path
else:
destination_key = relative_path
else:
destination_key = destination_wrapper.path
result = cls.remove_double_slashes(destination_key)
if result.startswith(delimiter):
return result[1:]
else:
return result
@classmethod
def remove_double_slashes(cls, path, delimiter=PATH_SEPARATOR):
return re.sub(delimiter + '+', delimiter, path)
@classmethod
def show_progress(cls, quiet, size, lock=None):
return not quiet and size is not None and size != 0 and lock is None
@classmethod
def get_local_file_size(cls, path):
try:
return os.path.getsize(path)
except OSError:
return None
@classmethod
def without_prefix(cls, string, prefix):
if string.startswith(prefix):
return string[len(prefix):]
@classmethod
def without_suffix(cls, string, suffix):
if string.endswith(suffix):
return string[:-len(suffix)]
@classmethod
def is_relative_path(cls, full_path, prefix, delimiter=PATH_SEPARATOR):
relative_path = StorageOperations.without_prefix(full_path, prefix)
return not relative_path or relative_path.startswith(delimiter)
@classmethod
def parse_tags(cls, tags):
if not tags:
return {}
if len(tags) > cls.MAX_TAGS_NUMBER:
raise ValueError(
"Maximum allowed number of tags is {}. Provided {} tags.".format(cls.MAX_TAGS_NUMBER, len(tags)))
tags_dict = {}
for tag in tags:
if "=" not in tag:
raise ValueError("Tags must be specified as KEY=VALUE pair.")
parts = tag.split("=", 1)
key = parts[0]
if len(key) > cls.MAX_KEY_LENGTH:
click.echo("Maximum key value is {}. Provided key {}.".format(cls.MAX_KEY_LENGTH, key))
continue
value = parts[1]
value = value.replace('\\', '/')
if not value or value.isspace() or bool(StorageOperations.TAGS_VALIDATION_PATTERN.search(value)):
click.echo("The tag value you have provided is invalid: %s. The tag %s will be skipped." % (value, key))
continue
if len(value) > cls.MAX_VALUE_LENGTH:
value = value[:cls.MAX_VALUE_LENGTH - len(cls.TAG_SHORTEN_SUFFIX)] + cls.TAG_SHORTEN_SUFFIX
tags_dict[key] = value
return tags_dict
@classmethod
def get_user(cls):
config = Config.instance()
user_info = jwt.decode(config.get_token(), verify=False)
if 'sub' in user_info:
return user_info['sub']
raise RuntimeError('Cannot find user info.')
@classmethod
def generate_tags(cls, raw_tags, source):
tags = StorageOperations.parse_tags(raw_tags)
tags[StorageOperations.CP_SOURCE_TAG] = source
tags[StorageOperations.CP_OWNER_TAG] = StorageOperations.get_user()
return tags
@classmethod
def source_tags(cls, tags, source_path, storage_wrapper):
bucket = storage_wrapper.bucket
default_tags = {}
if StorageOperations.CP_SOURCE_TAG not in tags:
scheme = WrapperType.cloud_scheme(bucket.type)
default_tags[StorageOperations.CP_SOURCE_TAG] = StorageOperations.STORAGE_PATH \
% (scheme, bucket.name, source_path)
if StorageOperations.CP_OWNER_TAG not in tags:
default_tags[StorageOperations.CP_OWNER_TAG] = StorageOperations.get_user()
return default_tags
@classmethod
def get_items(cls, listing_manager, relative_path, delimiter=PATH_SEPARATOR):
prefix = StorageOperations.get_prefix(relative_path).rstrip(delimiter)
for item in listing_manager.list_items(prefix, recursive=True, show_all=True):
if not StorageOperations.is_relative_path(item.name, prefix):
continue
if item.name == relative_path:
item_relative_path = os.path.basename(item.name)
else:
item_relative_path = StorageOperations.get_item_name(item.name, prefix + delimiter)
yield ('File', item.name, item_relative_path, item.size)
@classmethod
def file_is_empty(cls, size):
return not size or size == 0
class AbstractTransferManager:
__metaclass__ = ABCMeta
@abstractmethod
def get_destination_key(self, destination_wrapper, relative_path):
pass
@abstractmethod
def get_source_key(self, source_wrapper, source_path):
pass
@abstractmethod
def get_destination_size(self, destination_wrapper, destination_key):
pass
@abstractmethod
def transfer(self, source_wrapper, destination_wrapper, path=None, relative_path=None, clean=False,
quiet=False, size=None, tags=(), io_threads=None, lock=None):
"""
Transfers data from the source storage to the destination storage.
:param source_wrapper: Source data storage resource wrapper.
:type source_wrapper: DataStorageWrapper.
:param destination_wrapper: Destination data storage resource wrapper.
:type destination_wrapper: DataStorageWrapper.
:param path: Transfer data full path.
:param relative_path: Transfer data relative path.
:param clean: Remove source files after the transferring.
:param quiet: True if quite mode specified.
:param size: Size of the transfer source object.
:param tags: Additional tags that will be included to the transferring object.
Tags CP_SOURCE and CP_OWNER will be included by default.
:param io_threads: Number of threads to be used for a single file io operations.
:param lock: The lock object if multithreaded transfer is requested
:type lock: multiprocessing.Lock
"""
pass
@staticmethod
def skip_existing(source_key, source_size, destination_key, destination_size, quiet):
if destination_size is not None and destination_size == source_size:
if not quiet:
click.echo('Skipping file %s since it exists in the destination %s' % (source_key, destination_key))
return True
return False
@staticmethod
def create_local_folder(destination_key, lock):
folder = os.path.dirname(destination_key)
if lock:
lock.acquire()
try:
if folder and not os.path.exists(folder):
os.makedirs(folder)
finally:
if lock:
lock.release()
class AbstractListingManager:
__metaclass__ = ABCMeta
@abstractmethod
def list_items(self, relative_path=None, recursive=False, page_size=StorageOperations.DEFAULT_PAGE_SIZE,
show_all=False):
"""
Lists files and folders by a relative path in the current storage.
:param relative_path: Storage relative path to be listed.
:param recursive: Specifies if the listing has to be recursive.
:param page_size: Max number of items to return. The argument is ignored if show_all argument is specified.
:param show_all: Specifies if all items have to be listed.
"""
pass
@abstractmethod
def get_summary_with_depth(self, max_depth, relative_path=None):
"""
Returns tree with storage usage statistic under the given relative path and according to given depth.
:param max_depth: returns N or fewer levels below
:param relative_path: Storage relative path to be processed
:return: tree with storage usage statistic
"""
pass
@abstractmethod
def get_summary(self, relative_path=None):
"""
Calculates storage usage statistic according to relative path
:param relative_path: Storage relative path to be processed
:return: <Storage path>, <total objects by path>, <total objects size>
"""
pass
def get_items(self, relative_path):
"""
Returns all files under the given relative path in forms of tuples with the following structure:
('File', full_path, relative_path, size)
:param relative_path: Path to a folder or a file.
:return: Generator of file tuples.
"""
prefix = StorageOperations.get_prefix(relative_path).rstrip(StorageOperations.PATH_SEPARATOR)
for item in self.list_items(prefix, recursive=True, show_all=True):
if not StorageOperations.is_relative_path(item.name, prefix):
continue
if item.name == relative_path:
item_relative_path = os.path.basename(item.name)
else:
item_relative_path = StorageOperations.get_item_name(item.name, prefix + StorageOperations.PATH_SEPARATOR)
yield ('File', item.name, item_relative_path, item.size)
def folder_exists(self, relative_path, delimiter=StorageOperations.PATH_SEPARATOR):
prefix = StorageOperations.get_prefix(relative_path).rstrip(delimiter) + delimiter
for item in self.list_items(prefix, show_all=True):
if prefix.endswith(item.name):
return True
return False
@abstractmethod
def get_file_tags(self, relative_path):
pass
def get_file_size(self, relative_path):
items = self.list_items(relative_path, show_all=True, recursive=True)
for item in items:
if item.name == relative_path:
return item.size
return None
class AbstractDeleteManager:
__metaclass__ = ABCMeta
@abstractmethod
def delete_items(self, relative_path, recursive=False, exclude=[], include=[], version=None, hard_delete=False):
"""
Deletes all items under the given path.
:param relative_path: Storage relative path to be deleted.
:param recursive: Specifies if the deletion has to be recursive. The argument is required for folders deletion.
:param exclude: Exclude item pattern.
:param include: Include item pattern.
:param version: Version to be deleted.
:param hard_delete: Specifies if all item versions have to be deleted.
"""
pass
class AbstractRestoreManager:
__metaclass__ = ABCMeta
@abstractmethod
def restore_version(self, version, exclude, include, recursive):
"""
Restores item version.
:param version: Version to be restored.
"""
pass
| 39.094737 | 122 | 0.655762 |
import os
import re
from abc import abstractmethod, ABCMeta
from collections import namedtuple
import click
import jwt
from src.config import Config
from src.model.data_storage_wrapper_type import WrapperType
TransferResult = namedtuple('TransferResult', ['source_key', 'destination_key', 'destination_version', 'tags'])
UploadResult = namedtuple('UploadResult', ['source_key', 'destination_key', 'destination_version', 'tags'])
class StorageOperations:
PATH_SEPARATOR = '/'
DEFAULT_PAGE_SIZE = 100
MAX_TAGS_NUMBER = 10
MAX_KEY_LENGTH = 128
MAX_VALUE_LENGTH = 256
TAG_SHORTEN_SUFFIX = '...'
TAGS_VALIDATION_PATTERN = re.compile('[^a-zA-Z0-9\s_.\-@:+/\\\]+')
CP_SOURCE_TAG = 'CP_SOURCE'
CP_OWNER_TAG = 'CP_OWNER'
STORAGE_PATH = '%s://%s/%s'
__config__ = None
@classmethod
def get_proxy_config(cls, target_url=None):
if cls.__config__ is None:
cls.__config__ = Config.instance()
if cls.__config__.proxy is None:
return None
else:
return cls.__config__.resolve_proxy(target_url=target_url)
@classmethod
def init_wrapper(cls, wrapper, versioning=False):
delimiter = StorageOperations.PATH_SEPARATOR
prefix = StorageOperations.get_prefix(wrapper.path)
check_file = True
if prefix.endswith(delimiter):
prefix = prefix[:-1]
check_file = False
listing_manager = wrapper.get_list_manager(show_versions=versioning)
for item in listing_manager.list_items(prefix, show_all=True):
if prefix.endswith(item.name.rstrip(delimiter)) and (check_file or item.type == 'Folder'):
wrapper.exists_flag = True
wrapper.is_file_flag = item.type == 'File'
break
return wrapper
@classmethod
def get_prefix(cls, path, delimiter=PATH_SEPARATOR):
if path:
prefix = path
if prefix.startswith(delimiter):
prefix = prefix[1:]
else:
prefix = delimiter
return prefix
@classmethod
def get_item_name(cls, path, prefix, delimiter=PATH_SEPARATOR):
possible_folder_name = prefix if prefix.endswith(delimiter) else \
prefix + StorageOperations.PATH_SEPARATOR
if prefix and path.startswith(prefix) and path != possible_folder_name and path != prefix:
if not path == prefix:
splitted = prefix.split(StorageOperations.PATH_SEPARATOR)
return splitted[len(splitted) - 1] + path[len(prefix):]
else:
return path[len(prefix):]
elif not path.endswith(StorageOperations.PATH_SEPARATOR) and path == prefix:
return os.path.basename(path)
elif path == possible_folder_name:
return os.path.basename(path.rstrip(StorageOperations.PATH_SEPARATOR)) + StorageOperations.PATH_SEPARATOR
else:
return path
@classmethod
def normalize_path(cls, destination_wrapper, relative_path, delimiter=PATH_SEPARATOR):
if destination_wrapper.path.endswith(delimiter) or not destination_wrapper.is_file():
if os.path.sep != delimiter:
relative_path = relative_path.replace(os.path.sep, delimiter)
skip_separator = destination_wrapper.path.endswith(delimiter)
if destination_wrapper.path:
if skip_separator:
destination_key = destination_wrapper.path + relative_path
else:
destination_key = destination_wrapper.path + delimiter + relative_path
else:
destination_key = relative_path
else:
destination_key = destination_wrapper.path
result = cls.remove_double_slashes(destination_key)
if result.startswith(delimiter):
return result[1:]
else:
return result
@classmethod
def remove_double_slashes(cls, path, delimiter=PATH_SEPARATOR):
return re.sub(delimiter + '+', delimiter, path)
@classmethod
def show_progress(cls, quiet, size, lock=None):
return not quiet and size is not None and size != 0 and lock is None
@classmethod
def get_local_file_size(cls, path):
try:
return os.path.getsize(path)
except OSError:
return None
@classmethod
def without_prefix(cls, string, prefix):
if string.startswith(prefix):
return string[len(prefix):]
@classmethod
def without_suffix(cls, string, suffix):
if string.endswith(suffix):
return string[:-len(suffix)]
@classmethod
def is_relative_path(cls, full_path, prefix, delimiter=PATH_SEPARATOR):
relative_path = StorageOperations.without_prefix(full_path, prefix)
return not relative_path or relative_path.startswith(delimiter)
@classmethod
def parse_tags(cls, tags):
if not tags:
return {}
if len(tags) > cls.MAX_TAGS_NUMBER:
raise ValueError(
"Maximum allowed number of tags is {}. Provided {} tags.".format(cls.MAX_TAGS_NUMBER, len(tags)))
tags_dict = {}
for tag in tags:
if "=" not in tag:
raise ValueError("Tags must be specified as KEY=VALUE pair.")
parts = tag.split("=", 1)
key = parts[0]
if len(key) > cls.MAX_KEY_LENGTH:
click.echo("Maximum key value is {}. Provided key {}.".format(cls.MAX_KEY_LENGTH, key))
continue
value = parts[1]
value = value.replace('\\', '/')
if not value or value.isspace() or bool(StorageOperations.TAGS_VALIDATION_PATTERN.search(value)):
click.echo("The tag value you have provided is invalid: %s. The tag %s will be skipped." % (value, key))
continue
if len(value) > cls.MAX_VALUE_LENGTH:
value = value[:cls.MAX_VALUE_LENGTH - len(cls.TAG_SHORTEN_SUFFIX)] + cls.TAG_SHORTEN_SUFFIX
tags_dict[key] = value
return tags_dict
@classmethod
def get_user(cls):
config = Config.instance()
user_info = jwt.decode(config.get_token(), verify=False)
if 'sub' in user_info:
return user_info['sub']
raise RuntimeError('Cannot find user info.')
@classmethod
def generate_tags(cls, raw_tags, source):
tags = StorageOperations.parse_tags(raw_tags)
tags[StorageOperations.CP_SOURCE_TAG] = source
tags[StorageOperations.CP_OWNER_TAG] = StorageOperations.get_user()
return tags
@classmethod
def source_tags(cls, tags, source_path, storage_wrapper):
bucket = storage_wrapper.bucket
default_tags = {}
if StorageOperations.CP_SOURCE_TAG not in tags:
scheme = WrapperType.cloud_scheme(bucket.type)
default_tags[StorageOperations.CP_SOURCE_TAG] = StorageOperations.STORAGE_PATH \
% (scheme, bucket.name, source_path)
if StorageOperations.CP_OWNER_TAG not in tags:
default_tags[StorageOperations.CP_OWNER_TAG] = StorageOperations.get_user()
return default_tags
@classmethod
def get_items(cls, listing_manager, relative_path, delimiter=PATH_SEPARATOR):
prefix = StorageOperations.get_prefix(relative_path).rstrip(delimiter)
for item in listing_manager.list_items(prefix, recursive=True, show_all=True):
if not StorageOperations.is_relative_path(item.name, prefix):
continue
if item.name == relative_path:
item_relative_path = os.path.basename(item.name)
else:
item_relative_path = StorageOperations.get_item_name(item.name, prefix + delimiter)
yield ('File', item.name, item_relative_path, item.size)
@classmethod
def file_is_empty(cls, size):
return not size or size == 0
class AbstractTransferManager:
__metaclass__ = ABCMeta
@abstractmethod
def get_destination_key(self, destination_wrapper, relative_path):
pass
@abstractmethod
def get_source_key(self, source_wrapper, source_path):
pass
@abstractmethod
def get_destination_size(self, destination_wrapper, destination_key):
pass
@abstractmethod
def transfer(self, source_wrapper, destination_wrapper, path=None, relative_path=None, clean=False,
quiet=False, size=None, tags=(), io_threads=None, lock=None):
pass
@staticmethod
def skip_existing(source_key, source_size, destination_key, destination_size, quiet):
if destination_size is not None and destination_size == source_size:
if not quiet:
click.echo('Skipping file %s since it exists in the destination %s' % (source_key, destination_key))
return True
return False
@staticmethod
def create_local_folder(destination_key, lock):
folder = os.path.dirname(destination_key)
if lock:
lock.acquire()
try:
if folder and not os.path.exists(folder):
os.makedirs(folder)
finally:
if lock:
lock.release()
class AbstractListingManager:
__metaclass__ = ABCMeta
@abstractmethod
def list_items(self, relative_path=None, recursive=False, page_size=StorageOperations.DEFAULT_PAGE_SIZE,
show_all=False):
pass
@abstractmethod
def get_summary_with_depth(self, max_depth, relative_path=None):
pass
@abstractmethod
def get_summary(self, relative_path=None):
pass
def get_items(self, relative_path):
prefix = StorageOperations.get_prefix(relative_path).rstrip(StorageOperations.PATH_SEPARATOR)
for item in self.list_items(prefix, recursive=True, show_all=True):
if not StorageOperations.is_relative_path(item.name, prefix):
continue
if item.name == relative_path:
item_relative_path = os.path.basename(item.name)
else:
item_relative_path = StorageOperations.get_item_name(item.name, prefix + StorageOperations.PATH_SEPARATOR)
yield ('File', item.name, item_relative_path, item.size)
def folder_exists(self, relative_path, delimiter=StorageOperations.PATH_SEPARATOR):
prefix = StorageOperations.get_prefix(relative_path).rstrip(delimiter) + delimiter
for item in self.list_items(prefix, show_all=True):
if prefix.endswith(item.name):
return True
return False
@abstractmethod
def get_file_tags(self, relative_path):
pass
def get_file_size(self, relative_path):
items = self.list_items(relative_path, show_all=True, recursive=True)
for item in items:
if item.name == relative_path:
return item.size
return None
class AbstractDeleteManager:
__metaclass__ = ABCMeta
@abstractmethod
def delete_items(self, relative_path, recursive=False, exclude=[], include=[], version=None, hard_delete=False):
pass
class AbstractRestoreManager:
__metaclass__ = ABCMeta
@abstractmethod
def restore_version(self, version, exclude, include, recursive):
pass
| true | true |
f7287cbbee5f2c74133a1acb51112b32a015c354 | 4,520 | py | Python | t5x/losses_test.py | shpotes/s4x | d14be41ea8994c36fb75801a12837c1d3e77cb57 | [
"Apache-2.0"
] | 278 | 2021-11-03T20:24:32.000Z | 2022-03-29T23:44:06.000Z | t5x/losses_test.py | justinphan3110/t5x | 83151b8a7cfc78ebfc1d87ccc5109b6a0444a5e5 | [
"Apache-2.0"
] | 31 | 2021-11-04T02:10:31.000Z | 2022-03-29T19:03:49.000Z | t5x/losses_test.py | justinphan3110/t5x | 83151b8a7cfc78ebfc1d87ccc5109b6a0444a5e5 | [
"Apache-2.0"
] | 26 | 2021-11-03T23:26:19.000Z | 2022-03-29T00:18:51.000Z | # Copyright 2022 The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t5x.losses."""
from absl.testing import absltest
import jax
import jax.numpy as jnp
import numpy as np
from t5x import losses
class LossTest(absltest.TestCase):
def test_xent(self):
def lossfn(logits, targets, weights):
loss, z_loss, weight_sum = losses.compute_weighted_cross_entropy(
logits,
targets,
weights,
label_smoothing=0.1,
z_loss=0.1,
loss_normalizing_factor=0.1)
return loss, (z_loss, weight_sum)
batch_size = 2
length = 4
vocab_size = 8
logits = np.random.normal(size=(batch_size, length,
vocab_size)).astype(np.float32)
targets = np.random.randint(0, vocab_size, size=(batch_size, length))
weights = np.ones_like(targets)
out = jax.jit(jax.value_and_grad(lossfn, has_aux=True))(logits, targets,
weights)
(loss, (z_loss, weight_sum)), dlogits = out
# Just a smoke test for now
# TODO(t5x): Expand test
print(jax.device_get(((loss, (z_loss, weight_sum)), dlogits)))
class SpecialLossNormalizingFactorTest(absltest.TestCase):
def test_num_real_target_tokens(self):
batch = {
'decoder_target_tokens':
jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32)
}
(output_lnf,
output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(
loss_normalizing_factor=losses.SpecialLossNormalizingFactor
.NUM_REAL_TARGET_TOKENS,
batch=batch)
np.testing.assert_allclose(output_lnf, 6.0, rtol=1e-3)
np.testing.assert_allclose(
output_loss_weights,
np.array([[1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0]],
dtype=np.float32),
rtol=1e-3)
def test_num_total_target_tokens(self):
batch = {
'decoder_target_tokens':
jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32)
}
(output_lnf,
output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(
loss_normalizing_factor=losses.SpecialLossNormalizingFactor
.NUM_TOTAL_TARGET_TOKENS,
batch=batch)
np.testing.assert_allclose(output_lnf, 10.0, rtol=1e-3)
np.testing.assert_allclose(
output_loss_weights,
np.array([[1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0]],
dtype=np.float32),
rtol=1e-3)
def test_average_per_sequence(self):
batch = {
'decoder_target_tokens':
jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32)
}
(output_lnf,
output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(
loss_normalizing_factor=losses.SpecialLossNormalizingFactor
.AVERAGE_PER_SEQUENCE,
batch=batch)
np.testing.assert_allclose(output_lnf, 2.0, rtol=1e-3)
np.testing.assert_allclose(
output_loss_weights,
jnp.asarray([[0.25, 0.25, 0.25, 0.25, 0.0], [0.5, 0.5, 0.0, 0.0, 0.0]],
jnp.float32),
rtol=1e-3)
def test_average_per_sequence_with_weights(self):
batch = {
'decoder_target_tokens':
jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32),
'decoder_loss_weights':
jnp.asarray([[0.5, 1.0, 0.25, 2.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0]],
jnp.float32)
}
(output_lnf,
output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(
loss_normalizing_factor=losses.SpecialLossNormalizingFactor
.AVERAGE_PER_SEQUENCE,
batch=batch)
np.testing.assert_allclose(output_lnf, 2.0, rtol=1e-3)
np.testing.assert_allclose(
output_loss_weights,
jnp.asarray(
[[0.1333, 0.2666, 0.0666, 0.5333, 0.0], [0.5, 0.5, 0.0, 0.0, 0.0]],
jnp.float32),
rtol=1e-3)
if __name__ == '__main__':
absltest.main()
| 32.992701 | 80 | 0.625885 |
from absl.testing import absltest
import jax
import jax.numpy as jnp
import numpy as np
from t5x import losses
class LossTest(absltest.TestCase):
def test_xent(self):
def lossfn(logits, targets, weights):
loss, z_loss, weight_sum = losses.compute_weighted_cross_entropy(
logits,
targets,
weights,
label_smoothing=0.1,
z_loss=0.1,
loss_normalizing_factor=0.1)
return loss, (z_loss, weight_sum)
batch_size = 2
length = 4
vocab_size = 8
logits = np.random.normal(size=(batch_size, length,
vocab_size)).astype(np.float32)
targets = np.random.randint(0, vocab_size, size=(batch_size, length))
weights = np.ones_like(targets)
out = jax.jit(jax.value_and_grad(lossfn, has_aux=True))(logits, targets,
weights)
(loss, (z_loss, weight_sum)), dlogits = out
print(jax.device_get(((loss, (z_loss, weight_sum)), dlogits)))
class SpecialLossNormalizingFactorTest(absltest.TestCase):
def test_num_real_target_tokens(self):
batch = {
'decoder_target_tokens':
jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32)
}
(output_lnf,
output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(
loss_normalizing_factor=losses.SpecialLossNormalizingFactor
.NUM_REAL_TARGET_TOKENS,
batch=batch)
np.testing.assert_allclose(output_lnf, 6.0, rtol=1e-3)
np.testing.assert_allclose(
output_loss_weights,
np.array([[1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0]],
dtype=np.float32),
rtol=1e-3)
def test_num_total_target_tokens(self):
batch = {
'decoder_target_tokens':
jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32)
}
(output_lnf,
output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(
loss_normalizing_factor=losses.SpecialLossNormalizingFactor
.NUM_TOTAL_TARGET_TOKENS,
batch=batch)
np.testing.assert_allclose(output_lnf, 10.0, rtol=1e-3)
np.testing.assert_allclose(
output_loss_weights,
np.array([[1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0]],
dtype=np.float32),
rtol=1e-3)
def test_average_per_sequence(self):
batch = {
'decoder_target_tokens':
jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32)
}
(output_lnf,
output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(
loss_normalizing_factor=losses.SpecialLossNormalizingFactor
.AVERAGE_PER_SEQUENCE,
batch=batch)
np.testing.assert_allclose(output_lnf, 2.0, rtol=1e-3)
np.testing.assert_allclose(
output_loss_weights,
jnp.asarray([[0.25, 0.25, 0.25, 0.25, 0.0], [0.5, 0.5, 0.0, 0.0, 0.0]],
jnp.float32),
rtol=1e-3)
def test_average_per_sequence_with_weights(self):
batch = {
'decoder_target_tokens':
jnp.asarray([[1, 2, 3, 4, 0], [5, 6, 0, 0, 0]], jnp.int32),
'decoder_loss_weights':
jnp.asarray([[0.5, 1.0, 0.25, 2.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0]],
jnp.float32)
}
(output_lnf,
output_loss_weights) = losses.get_loss_normalizing_factor_and_weights(
loss_normalizing_factor=losses.SpecialLossNormalizingFactor
.AVERAGE_PER_SEQUENCE,
batch=batch)
np.testing.assert_allclose(output_lnf, 2.0, rtol=1e-3)
np.testing.assert_allclose(
output_loss_weights,
jnp.asarray(
[[0.1333, 0.2666, 0.0666, 0.5333, 0.0], [0.5, 0.5, 0.0, 0.0, 0.0]],
jnp.float32),
rtol=1e-3)
if __name__ == '__main__':
absltest.main()
| true | true |
f7287d50f20741fea305f41fd56b283240ba004b | 1,338 | py | Python | Data Analytics/Utilities/DA_tools.py | rafalmularczyk/public_lectures | fcd10c217f56021ebdec0046dfe0def7f31e9b0c | [
"CC-BY-4.0"
] | null | null | null | Data Analytics/Utilities/DA_tools.py | rafalmularczyk/public_lectures | fcd10c217f56021ebdec0046dfe0def7f31e9b0c | [
"CC-BY-4.0"
] | null | null | null | Data Analytics/Utilities/DA_tools.py | rafalmularczyk/public_lectures | fcd10c217f56021ebdec0046dfe0def7f31e9b0c | [
"CC-BY-4.0"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
light="#FFFCDC"
light_highlight="#FEF590"
mid="#FDED2A"
mid_highlight="#f0dc05"
dark="#EECA02"
dark_highlight="#BB9700"
green="#00FF00"
light_grey="#DDDDDD"
def is_sorted(a):
'''Check if numpy 1d-array is sorted
'''
return np.all(a[:-1] <= a[1:])
def ribbon_plot(x, fx, ax=None,zorder=0):
'''Plot a ribbon plot for regression and similar.
Plot consists of quantiles (by 10%) of a variate (fx) as a function of covariate (x).
x has shape (n, )
fx has shape (N,n)
'''
if ax is None:
ax = plt.gca()
if not is_sorted(x):
print('Sorting')
arr2D = np.concatenate([np.expand_dims(x,axis=0),fx],axis=0)
sortedArr = arr2D [ :, arr2D[0].argsort()]
x = sortedArr[0,:]
fx = sortedArr[1:,:]
probs = [10, 20, 30, 40, 50, 60, 70, 80, 90]
perc_interv=np.percentile(fx, probs, axis=0)
ax.fill_between(x,perc_interv[0,:],perc_interv[8,:],color=light,zorder=zorder)
ax.fill_between(x,perc_interv[1,:],perc_interv[7,:],color=light_highlight,zorder=zorder)
ax.fill_between(x,perc_interv[2,:],perc_interv[6,:],color=mid,zorder=zorder)
ax.fill_between(x,perc_interv[3,:],perc_interv[5,:],color=mid_highlight,zorder=zorder)
ax.plot(x,perc_interv[4,:],color=dark,zorder=zorder)
return(ax)
| 33.45 | 92 | 0.650972 | import matplotlib.pyplot as plt
import numpy as np
light="#FFFCDC"
light_highlight="#FEF590"
mid="#FDED2A"
mid_highlight="#f0dc05"
dark="#EECA02"
dark_highlight="#BB9700"
green="#00FF00"
light_grey="#DDDDDD"
def is_sorted(a):
return np.all(a[:-1] <= a[1:])
def ribbon_plot(x, fx, ax=None,zorder=0):
if ax is None:
ax = plt.gca()
if not is_sorted(x):
print('Sorting')
arr2D = np.concatenate([np.expand_dims(x,axis=0),fx],axis=0)
sortedArr = arr2D [ :, arr2D[0].argsort()]
x = sortedArr[0,:]
fx = sortedArr[1:,:]
probs = [10, 20, 30, 40, 50, 60, 70, 80, 90]
perc_interv=np.percentile(fx, probs, axis=0)
ax.fill_between(x,perc_interv[0,:],perc_interv[8,:],color=light,zorder=zorder)
ax.fill_between(x,perc_interv[1,:],perc_interv[7,:],color=light_highlight,zorder=zorder)
ax.fill_between(x,perc_interv[2,:],perc_interv[6,:],color=mid,zorder=zorder)
ax.fill_between(x,perc_interv[3,:],perc_interv[5,:],color=mid_highlight,zorder=zorder)
ax.plot(x,perc_interv[4,:],color=dark,zorder=zorder)
return(ax)
| true | true |
f7287f2e4a2f658ca902bcac9869b2dbf4f7b77c | 823 | py | Python | carbon0/carbon_quiz/migrations/0040_auto_20200921_1214.py | Carbon0-Games/carbon0-web-app | 068a7223b2717d602944ec561adcde39930cba85 | [
"MIT"
] | 2 | 2020-10-30T15:07:28.000Z | 2020-12-22T04:29:50.000Z | carbon0/carbon_quiz/migrations/0040_auto_20200921_1214.py | Carbon0-Games/carbon0-web-app | 068a7223b2717d602944ec561adcde39930cba85 | [
"MIT"
] | 45 | 2020-09-22T12:47:55.000Z | 2022-03-12T00:48:18.000Z | carbon0/carbon_quiz/migrations/0040_auto_20200921_1214.py | Carbon0-Games/carbon0-web-app | 068a7223b2717d602944ec561adcde39930cba85 | [
"MIT"
] | 1 | 2020-09-08T15:48:13.000Z | 2020-09-08T15:48:13.000Z | # Generated by Django 3.1.1 on 2020-09-21 16:14
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("carbon_quiz", "0039_auto_20200921_1212"),
]
operations = [
migrations.AlterField(
model_name="mission",
name="links",
field=django.contrib.postgres.fields.ArrayField(
base_field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(blank=True, max_length=100, null=True),
help_text="Links the user can click to complete the mission.",
null=True,
size=2,
),
null=True,
size=3,
),
),
]
| 28.37931 | 87 | 0.552855 |
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("carbon_quiz", "0039_auto_20200921_1212"),
]
operations = [
migrations.AlterField(
model_name="mission",
name="links",
field=django.contrib.postgres.fields.ArrayField(
base_field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(blank=True, max_length=100, null=True),
help_text="Links the user can click to complete the mission.",
null=True,
size=2,
),
null=True,
size=3,
),
),
]
| true | true |
f7287fa1facf82d928321d46a3d03a58682dac74 | 361 | py | Python | pyservertech/auth.py | netmanchris/pyservertech | 2dad5a3e1754c22bc20042cd5df629eb38f72b06 | [
"Apache-2.0"
] | null | null | null | pyservertech/auth.py | netmanchris/pyservertech | 2dad5a3e1754c22bc20042cd5df629eb38f72b06 | [
"Apache-2.0"
] | null | null | null | pyservertech/auth.py | netmanchris/pyservertech | 2dad5a3e1754c22bc20042cd5df629eb38f72b06 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# coding=utf-8
# author: @netmanchris
# -*- coding: utf-8 -*-
"""
This module contains functions for authenticating to the ServerTech
"""
class STAuth:
def __init__(self, ipaddr, rostring, rwstring, Port="161"):
self.ipaddr = ipaddr
self.rostring = rostring
self.rwstring = rwstring
self.port = Port | 22.5625 | 67 | 0.65097 |
class STAuth:
def __init__(self, ipaddr, rostring, rwstring, Port="161"):
self.ipaddr = ipaddr
self.rostring = rostring
self.rwstring = rwstring
self.port = Port | true | true |
f7287fd13c2ce1f49f02eb3a7afa4bd9d575fbea | 2,019 | py | Python | tests/test_api_status.py | max-arnold/python-block-disposable-email | 1cbfdfa9c3b3d84cb158fe079a07eb9427f97c6d | [
"BSD-3-Clause"
] | null | null | null | tests/test_api_status.py | max-arnold/python-block-disposable-email | 1cbfdfa9c3b3d84cb158fe079a07eb9427f97c6d | [
"BSD-3-Clause"
] | 6 | 2020-03-07T14:36:22.000Z | 2022-02-10T09:37:14.000Z | tests/test_api_status.py | max-arnold/python-block-disposable-email | 1cbfdfa9c3b3d84cb158fe079a07eb9427f97c6d | [
"BSD-3-Clause"
] | 1 | 2020-03-08T13:56:49.000Z | 2020-03-08T13:56:49.000Z | # -*- coding: utf-8 -*-
from bdea.client import BDEAStatusResponse
class TestBDEAStatusResponse(object):
RESPONSE = {
'apikeystatus': 'active',
'commercial_credit_status': 'exhausted',
'commercial_credit_status_percent': 0,
'credits': '0',
'credits_time': '2015-10-24 13:15:08',
'request_status': 'ok',
'servertime': '2015-10-24 13:38:38',
'version': '1.3'
}
def test_empty_response_is_not_valid(self):
res = BDEAStatusResponse({})
assert res.status() == False
def test_empty_response_means_zero_credits(self):
res = BDEAStatusResponse({})
assert res.credits() == 0
def test_empty_response_means_exausted_credits(self):
res = BDEAStatusResponse({})
assert res.credit_status() == 'exhausted'
def test_request_status_and_apikey_status(self):
res = self.RESPONSE.copy()
res.update({
'request_status': 'ok',
'apikeystatus': 'active'
})
assert BDEAStatusResponse(res).status() == True
res.update({
'request_status': 'ok',
'apikeystatus': 'inactive'
})
assert BDEAStatusResponse(res).status() == False
res.update({
'request_status': 'fail',
'apikeystatus': 'active'
})
assert BDEAStatusResponse(res).status() == False
def test_credit_status(self):
res = self.RESPONSE.copy()
for ccs in ('good', 'low', 'exhausted'):
res.update({
'request_status': 'ok',
'apikeystatus': 'active',
'commercial_credit_status': ccs
})
assert BDEAStatusResponse(res).credit_status() == ccs
def test_credits(self):
res = self.RESPONSE.copy()
res.update({
'request_status': 'ok',
'apikeystatus': 'active',
'credits': '100'
})
assert BDEAStatusResponse(res).credits() == 100
| 29.691176 | 65 | 0.563645 |
from bdea.client import BDEAStatusResponse
class TestBDEAStatusResponse(object):
RESPONSE = {
'apikeystatus': 'active',
'commercial_credit_status': 'exhausted',
'commercial_credit_status_percent': 0,
'credits': '0',
'credits_time': '2015-10-24 13:15:08',
'request_status': 'ok',
'servertime': '2015-10-24 13:38:38',
'version': '1.3'
}
def test_empty_response_is_not_valid(self):
res = BDEAStatusResponse({})
assert res.status() == False
def test_empty_response_means_zero_credits(self):
res = BDEAStatusResponse({})
assert res.credits() == 0
def test_empty_response_means_exausted_credits(self):
res = BDEAStatusResponse({})
assert res.credit_status() == 'exhausted'
def test_request_status_and_apikey_status(self):
res = self.RESPONSE.copy()
res.update({
'request_status': 'ok',
'apikeystatus': 'active'
})
assert BDEAStatusResponse(res).status() == True
res.update({
'request_status': 'ok',
'apikeystatus': 'inactive'
})
assert BDEAStatusResponse(res).status() == False
res.update({
'request_status': 'fail',
'apikeystatus': 'active'
})
assert BDEAStatusResponse(res).status() == False
def test_credit_status(self):
res = self.RESPONSE.copy()
for ccs in ('good', 'low', 'exhausted'):
res.update({
'request_status': 'ok',
'apikeystatus': 'active',
'commercial_credit_status': ccs
})
assert BDEAStatusResponse(res).credit_status() == ccs
def test_credits(self):
res = self.RESPONSE.copy()
res.update({
'request_status': 'ok',
'apikeystatus': 'active',
'credits': '100'
})
assert BDEAStatusResponse(res).credits() == 100
| true | true |
f728809195d5d918917fedc3e32c19868a77fc1c | 80,708 | py | Python | tefla/core/losses.py | mkulariya1/tefla | 8de25c1b67dcf025535f5e8c40539de59acd7fb8 | [
"MIT"
] | null | null | null | tefla/core/losses.py | mkulariya1/tefla | 8de25c1b67dcf025535f5e8c40539de59acd7fb8 | [
"MIT"
] | null | null | null | tefla/core/losses.py | mkulariya1/tefla | 8de25c1b67dcf025535f5e8c40539de59acd7fb8 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------#
# Written by Mrinal Haloi
# Contact: mrinal.haloi11@gmail.com
# Copyright 2016, Mrinal Haloi
# -------------------------------------------------------------------#
import numpy as np
import tensorflow as tf
import numbers
from functools import partial
from ..utils import util
from .layers import flatten, fully_connected as fc, relu
from .layers import gradient_reverse
from ..utils import losses_utils
log_loss = tf.losses.log_loss
def log_loss_custom(predictions, labels, eps=1e-7, name='log'):
"""Define a log loss.
Args:
predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .
labels: 2D or array tensor, [batch_size, num_classes] ground truth labels or target labels.
eps: a constant to set upper or lower limit for labels, smoothening factor
name: Optional scope/name for op_scope.
Returns:
A tensor with the log loss.
"""
with tf.name_scope(name):
predictions = tf.to_float(predictions)
labels = tf.to_float(labels)
predictions = tf.clip_by_value(predictions, eps, 1 - eps)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
loss = -tf.reduce_mean(labels * tf.log(predictions))
return loss
def log_loss_tf(predictions, labels, eps=1e-7, weights=1.0, name='log_loss'):
"""Define a log loss.
Args:
predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .
labels: 2D or array tensor, [batch_size, num_classes] ground truth labels or target labels.
eps: a constant to set upper or lower limit for labels, smoothening factor
name: Optional scope/name for op_scope.
Returns:
A tensor with the log loss.
"""
with tf.name_scope(name):
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = tf.to_float(predictions)
labels = tf.to_float(labels)
losses = -tf.multiply(labels, tf.log(predictions + eps)) - tf.multiply(
(1 - labels), tf.log(1 - predictions + eps))
return tf.losses.compute_weighted_loss(losses, weights)
def kappa_loss(predictions, labels, y_pow=1, eps=1e-15, num_ratings=5, batch_size=32, name='kappa'):
"""Define a kappa loss, Its a continuous differentiable approximation of
discrete kappa loss.
Args:
predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .
labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels.
y_pow: int, to whcih the labels should be raised; useful if model diverge. e.g. y_pow=2
num_ratings: numbers of rater to used, typically num_classes of the model
batch_size: batch_size of the training or validation ops
eps: a float, prevents divide by zero
name: Optional scope/name for op_scope.
Returns:
A tensor with the kappa loss.
"""
with tf.name_scope(name):
labels = tf.to_float(labels)
repeat_op = tf.to_float(
tf.tile(tf.reshape(tf.range(0, num_ratings), [num_ratings, 1]), [1, num_ratings]))
repeat_op_sq = tf.square((repeat_op - tf.transpose(repeat_op)))
weights = repeat_op_sq / tf.to_float((num_ratings - 1)**2)
pred_ = predictions**y_pow
try:
pred_norm = pred_ / \
(eps + tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1]))
except Exception:
pred_norm = pred_ / \
(eps + tf.reshape(tf.reduce_sum(pred_, 1), [batch_size, 1]))
hist_rater_a = tf.reduce_sum(pred_norm, 0)
hist_rater_b = tf.reduce_sum(labels, 0)
conf_mat = tf.matmul(tf.transpose(pred_norm), labels)
nom = tf.reduce_sum(weights * conf_mat)
denom = tf.reduce_sum(weights * tf.matmul(
tf.reshape(hist_rater_a, [num_ratings, 1]), tf.reshape(hist_rater_b, [1, num_ratings])) /
tf.to_float(batch_size))
try:
return -(1 - nom / denom)
except Exception:
return -(1 - nom / (denom + eps))
def kappa_log_loss(predictions,
labels,
label_smoothing=0.0,
y_pow=1,
batch_size=32,
log_scale=0.5,
num_classes=5,
log_offset=0.50,
name='kappa_log'):
"""Define a joint kappa and log loss, Kappa is a continuous differentiable
approximation of discrete kappa loss.
Args:
predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .
labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels.
label_smoothing: a float, used to smooth the labels for better generalization
if greater than 0 then smooth the labels.
y_pow: int, to whcih the labels should be raised; useful if model diverge. e.g. y_pow=2
num_ratings: numbers of rater to used, typically num_classes of the model
batch_size: batch_size of the training or validation ops
log_scale: a float, used to multiply the clipped log loss, e.g: 0.5
log_offset:a float minimum log loss offset to substract from original log loss; e.g. 0.50
name: Optional scope/name for op_scope.
Returns:
A tensor with the kappa log loss.
"""
with tf.name_scope(name):
num_classes = labels.get_shape()[-1].value
labels = tf.cast(labels, predictions.dtype)
if label_smoothing > 0:
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
labels = labels * smooth_positives + smooth_negatives
log_loss_res = log_loss(predictions, labels)
kappa_loss_res = kappa_loss(
predictions, labels, y_pow=y_pow, num_ratings=num_classes, batch_size=batch_size)
return kappa_loss_res + log_scale * (log_loss_res - log_offset)
def kappa_log_loss_clipped(predictions,
labels,
label_smoothing=0.0,
y_pow=1,
batch_size=32,
log_scale=0.5,
log_cutoff=0.80,
num_classes=5,
name='kappa_log_clipped'):
"""Define a joint kappa and log loss; log loss is clipped by a defined min
value; Kappa is a continuous differentiable approximation of discrete kappa
loss.
Args:
predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .
labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels.
label_smoothing: a float, used to smooth the labels for better generalization
if greater than 0 then smooth the labels.
y_pow: int, to whcih the labels should be raised; useful if model diverge. e.g. y_pow=2
num_ratings: numbers of rater to used, typically num_classes of the model
batch_size: batch_size of the training or validation ops
log_scale: a float, used to multiply the clipped log loss, e.g: 0.5
log_cutoff:a float, minimum log loss value; e.g. 0.50
name: Optional scope/name for op_scope.
Returns:
A tensor with the clipped kappa log loss.
"""
with tf.name_scope(name):
num_classes = labels.get_shape()[-1].value
labels = tf.cast(labels, predictions.dtype)
if label_smoothing > 0:
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
labels = labels * smooth_positives + smooth_negatives
log_loss_res = log_loss_tf(predictions, labels)
kappa_loss_res = kappa_loss(
predictions, labels, y_pow=y_pow, num_ratings=num_classes, batch_size=batch_size)
return kappa_loss_res + log_scale * tf.clip_by_value(log_loss_res, log_cutoff, 10**3)
def cross_entropy_loss(logits, labels, label_smoothing=0.0, weight=1.0, name='cross_entropy_loss'):
"""Define a cross entropy loss with label smoothing.
Args:
predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .
labels: 2D tensor or array,[batch_size, num_classes] ground truth labels or target labels.
label_smoothing: a float, used to smooth the labels for better generalization
if greater than 0 then smooth the labels.
weight: scale the loss by this factor.
name: Optional scope/name for op_scope.
Returns:
A tensor with the cross entropy loss.
"""
logits.get_shape().assert_is_compatible_with(labels.get_shape())
with tf.name_scope(name):
num_classes = labels.get_shape()[-1].value
labels = tf.cast(labels, logits.dtype)
if label_smoothing > 0:
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
labels = labels * smooth_positives + smooth_negatives
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='xentropy')
weight = tf.convert_to_tensor(weight, dtype=logits.dtype.base_dtype, name='loss_weight')
loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value')
return loss
def l1_l2_regularizer(var, weight_l1=1.0, weight_l2=1.0, name='l1_l2_regularizer'):
"""Define a L2Loss, useful for regularize, i.e. weight decay.
Args:
var: tensor to regularize.
weight_l1: an optional weight to modulate the l1 loss.
weight_l2: an optional weight to modulate the l2 loss.
name: Optional scope/name for op_scope.
Returns:
the l1+L2 loss op.
"""
with tf.name_scope(name):
weight_l1_t = tf.convert_to_tensor(weight_l1, dtype=var.dtype.base_dtype, name='weight_l1')
weight_l2_t = tf.convert_to_tensor(weight_l2, dtype=var.dtype.base_dtype, name='weight_l2')
reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(var)), name='value_l1')
reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(var), name='value_l2')
return tf.add(reg_l1, reg_l2, name='value')
def l1_regularizer(scale, name='l1_regularizer'):
"""Returns a function that can be used to apply L1 regularization to weights.
L1 regularization encourages sparsity.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
name: An optional name/scope name.
Returns:
A function with signature `l1(weights)` that apply L1 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
if scale == 0.:
return lambda _: None
def l1(weights, name='l1_regularizer'):
"""Applies L1 regularization to weights."""
with tf.name_scope(name):
my_scale = tf.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
return tf.multiply(my_scale, tf.reduce_sum(tf.abs(weights)), name=name)
return l1
def l2_regularizer(scale, name='l2_regularizer'):
"""Returns a function that can be used to apply L2 regularization to weights.
Small values of L2 can help prevent overfitting the training data.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
name: An optional name/scope name.
Returns:
A function with signature `l2(weights)` that applies L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % (scale,))
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g.' % scale)
if scale == 0.:
return lambda _: None
def l2(weights, name='l2_regularizer'):
"""Applies l2 regularization to weights."""
with tf.name_scope(name):
my_scale = tf.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
return tf.multiply(my_scale, nn.l2_loss(weights), name=name)
return l2
def discretized_mix_logistic_loss(inputs,
predictions,
sum_all=True,
name='disretized_mix_logistic_loss'):
"""log-likelihood for mixture of discretized logistics, assumes the data has
been rescaled to.
[-1,1] interval
Args:
predictions: 4D tensor or array, [batch_size, width, height, out_channels]
predictions of the network .
inputs: 4D tensor or array, [batch_size, width, height, num_classes]
ground truth labels or target labels.
name: Optional scope/name for op_scope.
Returns:
A tensor with the discretized mix logistic loss.
"""
with tf.name_scope(name):
inputs_shape = list(map(int, inputs.get_shape()))
predictions_shape = list(map(int, predictions.get_shape()))
nr_mix = int(predictions_shape[-1] / 10)
logit_probs = predictions[:, :, :, :nr_mix]
predictions = tf.reshape(predictions[:, :, :, nr_mix:], inputs_shape + [nr_mix * 3])
means = predictions[:, :, :, :, :nr_mix]
log_scales = tf.maximum(predictions[:, :, :, :, nr_mix:2 * nr_mix], -7.)
coeffs = tf.nn.tanh(predictions[:, :, :, :, 2 * nr_mix:3 * nr_mix])
inputs = tf.reshape(inputs, inputs_shape + [1]) + tf.zeros(inputs_shape + [nr_mix])
m2 = tf.reshape(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :] * inputs[:, :, :, 0, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])
m3 = tf.reshape(
means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * inputs[:, :, :, 0, :] +
coeffs[:, :, :, 2, :] * inputs[:, :, :, 1, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])
means = tf.concat([
tf.reshape(means[:, :, :, 0, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]), m2, m3
],
axis=3)
centered_inputs = inputs - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_inputs + 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
min_in = inv_stdv * (centered_inputs - 1. / 255.)
cdf_min = tf.nn.sigmoid(min_in)
log_cdf_plus = plus_in - tf.nn.softplus(plus_in)
log_one_minus_cdf_min = -tf.nn.softplus(min_in)
cdf_delta = cdf_plus - cdf_min
mid_in = inv_stdv * centered_inputs
log_pdf_mid = mid_in - log_scales - 2. * tf.nn.softplus(mid_in)
log_probs = tf.select(
inputs < -0.999, log_cdf_plus,
tf.select(
inputs > 0.999, log_one_minus_cdf_min,
tf.select(cdf_delta > 1e-5, tf.log(tf.maximum(cdf_delta, 1e-12)),
log_pdf_mid - np.log(127.5))))
log_probs = tf.reduce_sum(log_probs, 3) + \
log_prob_from_logits(logit_probs)
if sum_all:
return -tf.reduce_sum(log_sum_exp(log_probs))
else:
return -tf.reduce_sum(log_sum_exp(log_probs), [1, 2])
def mse_loss(pred, labels):
try:
batch_size = tf.cast(pred.shape[0], tf.float32)
except Exception as e:
print('Pred is a tf tensor %s' % str(e.message))
batch_size = tf.cast(tf.shape(pred)[0], tf.float32)
loss_val = tf.sqrt(2 * tf.nn.l2_loss(pred - labels)) / batch_size
return loss_val
def pullaway_loss(embeddings, name='pullaway_loss'):
"""Pull Away loss calculation.
Args:
embeddings: The embeddings to be orthogonalized for varied faces.
Shape [batch_size, embeddings_dim]
Return: pull away term loss
"""
with tf.name_scope(name):
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
similarity = tf.matmul(normalized_embeddings, normalized_embeddings, transpose_b=True)
batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32)
pt_loss = (tf.reduce_sum(similarity) - batch_size) / \
(batch_size * (batch_size - 1))
return pt_loss
def log_sum_exp(x):
"""numerically stable log_sum_exp implementation that prevents overflow."""
axis = len(x.get_shape()) - 1
m = tf.reduce_max(x, axis)
m2 = tf.reduce_max(x, axis, keep_dims=True)
return m + tf.log(tf.reduce_sum(tf.exp(x - m2), axis))
def log_prob_from_logits(x):
"""numerically stable log_softmax implementation that prevents overflow."""
axis = len(x.get_shape()) - 1
m = tf.reduce_max(x, axis, keep_dims=True)
return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keep_dims=True))
def segment_loss(logits, labels, num_classes, head=None):
"""Calculate the loss from the logits and the labels.
Args:
logits: tensor, float - [batch_size * width * height, num_classes].
Use vgg_fcn.up as logits.
labels: Labels tensor, int32 - [batch_size * width * height, num_classes].
The ground truth of your data.
head: numpy array - [num_classes]
Weighting the loss of each class
Optional: Prioritize some classes
Returns:
loss: Loss tensor of type float.
"""
with tf.name_scope('segment_loss'):
# logits = tf.reshape(logits, (-1, num_classes))
epsilon = tf.constant(value=1e-7)
labels = tf.to_float(labels)
# labels = tf.to_float(tf.reshape(labels, (-1, num_classes)))
softmax = tf.nn.softmax(logits) + epsilon
if head is not None:
cross_entropy = -tf.reduce_sum(tf.mul(labels * tf.log(softmax), head), axis=[1])
else:
cross_entropy = -tf.reduce_sum(labels * tf.log(softmax), axis=[1])
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return cross_entropy_mean
def triplet_loss(anchor, positive, negative, alpha=0.2, name='triplet_loss'):
"""Calculate the triplet loss according to the FaceNet paper.
Args:
anchor: 2-D `tensor` [batch_size, embedding_size], the embeddings for the anchor images.
positive: 2-D `tensor` [batch_size, embedding_size], the embeddings for the positive images.
negative: 2-D `tensor` [batch_size, embedding_size], the embeddings for the negative images.
alpha: positive to negative triplet distance margin
Returns:
the triplet loss.
"""
with tf.name_scope(name):
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
def decov_loss(xs, name='decov_loss'):
"""Decov loss as described in https://arxiv.org/pdf/1511.06068.pdf 'Reducing
Overfitting In Deep Networks by Decorrelating Representation'.
Args:
xs: 4-D `tensor` [batch_size, height, width, channels], input
Returns:
a `float` decov loss
"""
with tf.name_scope(name):
x = tf.reshape(xs, [int(xs.get_shape()[0]), -1])
m = tf.reduce_mean(x, 0, True)
z = tf.expand_dims(x - m, 2)
corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0, 2, 1])), 0)
corr_frob_sqr = tf.reduce_sum(tf.square(corr))
corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))
loss = 0.5 * (corr_frob_sqr - corr_diag_sqr)
return loss
def center_loss(features, label, alpha, num_classes, name='center_loss'):
"""Center loss based on the paper "A Discriminative Feature Learning Approach
for Deep Face Recognition" (http://ydwen.github.io/papers/WenECCV16.pdf)
Args:
features: 2-D `tensor` [batch_size, feature_length], input features
label: 1-D `tensor` [batch_size], input label
alpha: center loss parameter
num_classes: a `int` numof classes for training
Returns:
a `float`, center loss
"""
with tf.variable_scope(name):
num_features = features.get_shape()[1]
centers = tf.get_variable(
'centers', [num_classes, num_features],
dtype=tf.float32,
initializer=tf.constant_initializer(0),
trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alpha) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
loss = tf.nn.l2_loss(features - centers_batch)
return loss, centers
def correlation_loss(source_samples, target_samples, weight, name='corr_loss'):
"""Adds a similarity loss term, the correlation between two representations.
Args:
source_samples: a tensor of shape [num_samples, num_features]
target_samples: a tensor of shape [num_samples, num_features]
weight: a scalar weight for the loss.
scope: optional name scope for summary tags.
Returns:
a scalar tensor representing the correlation loss value.
"""
with tf.name_scope(name):
source_samples -= tf.reduce_mean(source_samples, 0)
target_samples -= tf.reduce_mean(target_samples, 0)
source_samples = tf.nn.l2_normalize(source_samples, 1)
target_samples = tf.nn.l2_normalize(target_samples, 1)
source_cov = tf.matmul(tf.transpose(source_samples), source_samples)
target_cov = tf.matmul(tf.transpose(target_samples), target_samples)
corr_loss = tf.reduce_mean(tf.square(source_cov - target_cov)) * weight
assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss])
with tf.control_dependencies([assert_op]):
tag = 'Correlation Loss'
barrier = tf.no_op(tag)
return corr_loss
def maximum_mean_discrepancy(x,
y,
kernel=util.gaussian_kernel_matrix,
name='maximum_mean_discrepancy'):
r"""Computes the Maximum Mean Discrepancy (MMD) of two samples: x and y.
Maximum Mean Discrepancy (MMD) is a distance-measure between the samples of
the distributions of x and y. Here we use the kernel two sample estimate
using the empirical mean of the two distributions.
MMD^2(P, Q) = || \E{\phi(x)} - \E{\phi(y)} ||^2
= \E{ K(x, x) } + \E{ K(y, y) } - 2 \E{ K(x, y) },
where K = <\phi(x), \phi(y)>,
is the desired kernel function, in this case a radial basis kernel.
Args:
x: a tensor of shape [num_samples, num_features]
y: a tensor of shape [num_samples, num_features]
kernel: a function which computes the kernel in MMD. Defaults to the
GaussianKernelMatrix.
Returns:
a scalar denoting the squared maximum mean discrepancy loss.
"""
with tf.name_scope(name):
# \E{ K(x, x) } + \E{ K(y, y) } - 2 \E{ K(x, y) }
cost = tf.reduce_mean(kernel(x, x))
cost += tf.reduce_mean(kernel(y, y))
cost -= 2 * tf.reduce_mean(kernel(x, y))
# We do not allow the loss to become negative.
cost = tf.where(cost > 0, cost, 0, name='value')
return cost
def mmd_loss(source_samples, target_samples, weight, name='mmd_loss'):
"""Adds a similarity loss term, the MMD between two representations.
This Maximum Mean Discrepancy (MMD) loss is calculated with a number of
different Gaussian kernels.
Args:
source_samples: a tensor of shape [num_samples, num_features].
target_samples: a tensor of shape [num_samples, num_features].
weight: the weight of the MMD loss.
scope: optional name scope for summary tags.
Returns:
a scalar tensor representing the MMD loss value.
"""
with tf.name_scope(name):
sigmas = [
1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6
]
gaussian_kernel = partial(util.gaussian_kernel_matrix, sigmas=tf.constant(sigmas))
loss_value = maximum_mean_discrepancy(source_samples, target_samples, kernel=gaussian_kernel)
loss_value = tf.maximum(1e-4, loss_value) * weight
assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value])
with tf.control_dependencies([assert_op]):
tag = 'MMD_Loss'
barrier = tf.no_op(tag)
return loss_value
def dann_loss(source_samples, target_samples, weight, name='dann_loss'):
"""Adds the domain adversarial (DANN) loss.
Args:
source_samples: a tensor of shape [num_samples, num_features].
target_samples: a tensor of shape [num_samples, num_features].
weight: the weight of the loss.
scope: optional name scope for summary tags.
Returns:
a scalar tensor representing the correlation loss value.
"""
with tf.variable_scope(name):
batch_size = tf.shape(source_samples)[0]
samples = tf.concat(values=[source_samples, target_samples], axis=0)
samples = flatten(samples)
domain_selection_mask = tf.concat(
values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))], axis=0)
grl = gradient_reverse(samples)
grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1]))
grl = fc(grl, 100, True, None, activation=relu, name='fc1')
logits = fc(grl, 1, True, None, activation=None, name='fc2')
domain_predictions = tf.sigmoid(logits)
domain_loss = tf.losses.log_loss(domain_selection_mask, domain_predictions, weights=weight)
domain_accuracy = util.accuracy_tf(domain_selection_mask, tf.round(domain_predictions))
assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss])
with tf.control_dependencies([assert_op]):
tag_loss = 'losses/domain_loss'
barrier = tf.no_op(tag_loss)
return domain_loss
def difference_loss(private_samples, shared_samples, weight=1.0, name='difference_loss'):
"""Adds the difference loss between the private and shared representations.
Args:
private_samples: a tensor of shape [num_samples, num_features].
shared_samples: a tensor of shape [num_samples, num_features].
weight: the weight of the incoherence loss.
name: the name of the tf summary.
"""
with tf.name_scope(name):
private_samples -= tf.reduce_mean(private_samples, 0)
shared_samples -= tf.reduce_mean(shared_samples, 0)
private_samples = tf.nn.l2_normalize(private_samples, 1)
shared_samples = tf.nn.l2_normalize(shared_samples, 1)
correlation_matrix = tf.matmul(private_samples, shared_samples, transpose_a=True)
cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight
cost = tf.where(cost > 0, cost, 0, name='value')
assert_op = tf.Assert(tf.is_finite(cost), [cost])
with tf.control_dependencies([assert_op]):
barrier = tf.no_op(name)
return cost
def log_quaternion_loss_batch(predictions, labels, name='log_quaternion_batch_loss'):
"""A helper function to compute the error between quaternions.
Args:
predictions: A Tensor of size [batch_size, 4].
labels: A Tensor of size [batch_size, 4].
params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'.
Returns:
A Tensor of size [batch_size], denoting the error between the quaternions.
"""
assertions = []
assertions.append(
tf.Assert(
tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), 1e-4)),
['The l2 norm of each prediction quaternion vector should be 1.']))
assertions.append(
tf.Assert(
tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)),
['The l2 norm of each label quaternion vector should be 1.']))
with tf.name_scope(name):
with tf.control_dependencies(assertions):
product = tf.multiply(predictions, labels)
internal_dot_products = tf.reduce_sum(product, [1])
logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))
return logcost
def log_quaternion_loss(predictions, labels, batch_size, name='log_quaternion_loss'):
"""A helper function to compute the mean error between batches of
quaternions.
The caller is expected to add the loss to the graph.
Args:
predictions: A Tensor of size [batch_size, 4].
labels: A Tensor of size [batch_size, 4].
params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'.
Returns:
A Tensor of size 1, denoting the mean error between batches of quaternions.
"""
with tf.name_scope(name):
logcost = log_quaternion_loss_batch(predictions, labels)
logcost = tf.reduce_sum(logcost, [0])
logcost = tf.multiply(logcost, 1.0 / batch_size, name='log_quaternion_loss')
return logcost
def random_perturbation_loss(embedded, length, loss_fn, perturb_norm_length=0.1):
"""Adds noise to embeddings and recomputes classification loss.
Args:
embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim]
length: a `int`, length of the mask
loss_fn: a callable, that returns loss
perturb_norm_length: a `float`, Norm length of adversarial perturbation
to be optimized with validatio
Returns:
perturbation loss
"""
noise = tf.random_normal(shape=tf.shape(embedded))
perturb = _scale_l2(_mask_by_length(noise, length), perturb_norm_length)
return loss_fn(embedded + perturb)
def adversarial_loss(embedded, loss, loss_fn, perturb_norm_length=0.1):
"""Adds gradient to embedding and recomputes classification loss.
Args:
embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim]
loss: `float`, loss
loss_fn: a callable, that returns loss
perturb_norm_length: a `float`, Norm length of adversarial perturbation
to be optimized with validatio
Returns:
adversial loss
"""
grad, = tf.gradients(
loss, embedded, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
grad = tf.stop_gradient(grad)
perturb = _scale_l2(grad, perturb_norm_length)
return loss_fn(embedded + perturb)
def virtual_adversarial_loss(logits,
embedded,
labels,
length,
logits_from_embedding_fn,
num_classes,
num_power_iteration=1,
small_constant_for_finite_diff=1e-3,
perturb_norm_length=0.1):
"""Virtual adversarial loss. Computes virtual adversarial perturbation by
finite difference method and power iteration, adds it to the embedding, and
computes the KL divergence between the new logits and the original logits.
Args:
logits: 2-D float `Tensor`, [num_timesteps*batch_size, m], where m=1 if
num_classes=2, otherwise m=num_classes.
embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim].
labels: 1-D `Tensor`, input labels
length: a `int`, input length
logits_from_embedding_fn: callable that takes embeddings and returns
classifier logits.
num_classes: num_classes for training
vocab_size: a `int`, vocabular size of the problem
num_power_iteration: a `int`, the number of power iteration
small_constant_for_finite_diff: a `float`, Small constant for finite difference method
perturb_norm_length: a `float`, Norm length of adversarial perturbation
to be optimized with validatio
Returns:
a `float` `scalar`, KL divergence.
"""
logits = tf.stop_gradient(logits)
weights = _end_of_seq_mask(labels, vocab_size)
d = _mask_by_length(tf.random_normal(shape=tf.shape(embedded)), length)
for _ in range(num_power_iteration):
d = _scale_l2(d, small_constant_for_finite_diff)
d_logits = logits_from_embedding_fn(embedded + d)
kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes)
d, = tf.gradients(kl, d, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
d = tf.stop_gradient(d)
perturb = _scale_l2(_mask_by_length(d, length), perturb_norm_length)
vadv_logits = logits_from_embedding_fn(embedded + perturb)
return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes)
def random_perturbation_loss_brnn(embedded, length, loss_fn, perturb_norm_length=0.1):
"""Adds noise to embeddings and recomputes classification loss fir
bidirectional rnn models.
Args:
embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim]
length: a `int`, length of the mask
loss_fn: a callable, that returns loss
perturb_norm_length: a `float`, Norm length of adversarial perturbation to
be optimized with validatio
Returns:
perturbation loss
"""
noise = [tf.random_normal(shape=tf.shape(emb)) for emb in embedded]
masked = [_mask_by_length(n, length) for n in noise]
scaled = [_scale_l2(m, perturb_norm_length) for m in masked]
return loss_fn([e + s for (e, s) in zip(embedded, scaled)])
def adversarial_loss_brnn(embedded, loss, loss_fn, perurb_norm_length=0.1):
"""Adds gradient to embeddings and recomputes classification loss for
bidirectional rnn models.
Args:
embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim]
loss: `float`, loss
loss_fn: a callable, that returns loss
perturb_norm_length: a `float`, Norm length of adversarial perturbation
to be optimized with validatio
Returns:
adversial loss
"""
grads = tf.gradients(
loss, embedded, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
adv_exs = [
emb + _scale_l2(tf.stop_gradient(g), perturb_norm_length) for emb, g in zip(embedded, grads)
]
return loss_fn(adv_exs)
def virtual_adversarial_loss_brnn(logits,
embedded,
labels,
length,
logits_from_embedding_fn,
vocab_size,
num_classes,
num_power_iteration=1,
small_constant_for_finite_diff=1e-3,
perturb_norm_length=0.1):
"""Virtual adversarial loss for bidirectional models Computes virtual
adversarial perturbation by finite difference method and power iteration,
adds it to the embedding, and computes the KL divergence between the new
logits and the original logits.
Args:
logits: 2-D float `Tensor`, [num_timesteps*batch_size, m], where m=1 if
num_classes=2, otherwise m=num_classes.
embedded: 3-D float `Tensor`, [batch_size, num_timesteps, embedding_dim].
labels: 1-D `Tensor`, input labels
length: a `int`, input length
logits_from_embedding_fn: callable that takes embeddings and returns
classifier logits.
num_classes: num_classes for training
vocab_size: a `int`, vocabular size of the problem
num_power_iteration: a `int`, the number of power iteration
small_constant_for_finite_diff: a `float`, Small constant for finite difference method
perturb_norm_length: a `float`, Norm length of adversarial perturbation
to be optimized with validatio
Returns:
a `float` `scalar`, KL divergence.
"""
logits = tf.stop_gradient(logits)
weights = _end_of_seq_mask(labels, vocab_size)
perturbs = [_mask_by_length(tf.random_normal(shape=tf.shape(emb)), length) for emb in embedded]
for _ in range(num_power_iteration):
perturbs = [_scale_l2(d, small_constant_for_finite_diff) for d in perturbs]
d_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)])
kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes)
perturbs = tf.gradients(
kl, perturbs, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
perturbs = [tf.stop_gradient(d) for d in perturbs]
perturbs = [_scale_l2(_mask_by_length(d, length), perturb_norm_length) for d in perturbs]
vadv_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)])
return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes)
def _mask_by_length(t, length):
maxlen = t.get_shape().as_list()[1]
mask = tf.sequence_mask(length, maxlen=maxlen)
mask = tf.expand_dims(tf.cast(mask, tf.float32), -1)
return t * mask
def _scale_l2(x, norm_length):
alpha = tf.reduce_max(tf.abs(x), (1, 2), keep_dims=True) + 1e-12
l2_norm = alpha * tf.sqrt(tf.reduce_sum(tf.pow(x / alpha, 2), (1, 2), keep_dims=True) + 1e-6)
x_unit = x / l2_norm
return norm_length * x_unit
def _end_of_seq_mask(tokens, vocab_size):
"""Generate a mask for the EOS token (1.0 on EOS, 0.0 otherwise).
Args:
tokens: 1-D integer `Tensor` [num_timesteps*batch_size]. Each element is an
id from the vocab.
vocab_size: a `int`, vocabular size of the problem
Returns:
Float 1-D `Tensor` same shape as tokens, whose values are 1.0 on the end of
sequence and 0.0 on the others.
"""
eos_id = vocab_size - 1
return tf.cast(tf.equal(tokens, eos_id), tf.float32)
def _kl_divergence_with_logits(q_logits, p_logits, weights, num_classes):
"""Returns weighted KL divergence between distributions q and p.
Args:
q_logits: logits for 1st argument of KL divergence shape
[num_timesteps * batch_size, num_classes] if num_classes > 2, and
[num_timesteps * batch_size] if num_classes == 2.
p_logits: logits for 2nd argument of KL divergence with same shape q_logits.
weights: 1-D `float` tensor with shape [num_timesteps * batch_size].
Elements should be 1.0 only on end of sequences
num_classes: a `int`, number of training classes
Returns:
a `float` `scalar`, KL divergence.
"""
if num_classes == 2:
q = tf.nn.sigmoid(q_logits)
p = tf.nn.sigmoid(p_logits)
kl = (-tf.nn.sigmoid_cross_entropy_with_logits(logits=q_logits, labels=q) +
f.nn.sigmoid_cross_entropy_with_logits(logits=p_logits, labels=q))
else:
q = tf.nn.softmax(q_logits)
p = tf.nn.softmax(p_logits)
kl = tf.reduce_sum(q * (tf.log(q) - tf.log(p)), 1)
num_labels = tf.reduce_sum(weights)
num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels)
kl.get_shape().assert_has_rank(2)
weights.get_shape().assert_has_rank(1)
loss = tf.identity(tf.reduce_sum(tf.expand_dims(weights, -1) * kl) / num_labels, name='kl')
return loss
def cross_entropy_sequence_loss(logits, targets, sequence_length):
"""Calculates the per-example cross-entropy loss for a sequence of logits and
masks out all losses passed the sequence length.
Args:
logits: Logits of shape `[T, B, vocab_size]`
targets: Target classes of shape `[T, B]`
sequence_length: An int32 tensor of shape `[B]` corresponding
to the length of each input
Returns:
A tensor of shape [T, B] that contains the loss per example, per time step.
"""
with tf.name_scope("cross_entropy_sequence_loss"):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets)
loss_mask = tf.sequence_mask(tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])
return losses
def dice_loss(predictions, targets, weights=1., name='dice_loss'):
with tf.name_scope(name):
# predictions = tf.to_float(predictions)
targets = tf.to_float(targets)
intersection = 2 * tf.reduce_sum(predictions * targets) + weights
union = weights + tf.reduce_sum(predictions) + tf.reduce_sum(targets)
loss = -(intersection / (union))
return loss
def precision_recall_auc_loss(labels,
logits,
precision_range=(0.0, 1.0),
num_anchors=20,
weights=1.0,
dual_rate_factor=0.1,
label_priors=None,
surrogate_type='xent',
lambdas_initializer=tf.constant_initializer(1.0),
reuse=None,
variables_collections=None,
trainable=True,
scope=None):
"""Computes precision-recall AUC loss.
The loss is based on a sum of losses for recall at a range of
precision values (anchor points). This sum is a Riemann sum that
approximates the area under the precision-recall curve.
The per-example `weights` argument changes not only the coefficients of
individual training examples, but how the examples are counted toward the
constraint. If `label_priors` is given, it MUST take `weights` into account.
That is,
label_priors = P / (P + N)
where
P = sum_i (wt_i on positives)
N = sum_i (wt_i on negatives).
Args:
labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].
logits: A `Tensor` with the same shape as `labels`.
precision_range: A length-two tuple, the range of precision values over
which to compute AUC. The entries must be nonnegative, increasing, and
less than or equal to 1.0.
num_anchors: The number of grid points used to approximate the Riemann sum.
weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape
[batch_size] or [batch_size, num_labels].
dual_rate_factor: A floating point value which controls the step size for
the Lagrange multipliers.
label_priors: None, or a floating point `Tensor` of shape [num_labels]
containing the prior probability of each label (i.e. the fraction of the
training data consisting of positive examples). If None, the label
priors are computed from `labels` with a moving average. See the notes
above regarding the interaction with `weights` and do not set this unless
you have a good reason to do so.
surrogate_type: Either 'xent' or 'hinge', specifying which upper bound
should be used for indicator functions.
lambdas_initializer: An initializer for the Lagrange multipliers.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for the variables.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
scope: Optional scope for `variable_scope`.
Returns:
loss: A `Tensor` of the same shape as `logits` with the component-wise
loss.
other_outputs: A dictionary of useful internal quantities for debugging. For
more details, see http://arxiv.org/pdf/1608.04802.pdf.
lambdas: A Tensor of shape [1, num_labels, num_anchors] consisting of the
Lagrange multipliers.
biases: A Tensor of shape [1, num_labels, num_anchors] consisting of the
learned bias term for each.
label_priors: A Tensor of shape [1, num_labels, 1] consisting of the prior
probability of each label learned by the loss, if not provided.
true_positives_lower_bound: Lower bound on the number of true positives
given `labels` and `logits`. This is the same lower bound which is used
in the loss expression to be optimized.
false_positives_upper_bound: Upper bound on the number of false positives
given `labels` and `logits`. This is the same upper bound which is used
in the loss expression to be optimized.
Raises:
ValueError: If `surrogate_type` is not `xent` or `hinge`.
"""
with tf.variable_scope(scope, 'precision_recall_auc', [labels, logits, label_priors], reuse=reuse):
labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)
num_labels = losses_utils.get_num_labels(logits)
# Convert other inputs to tensors and standardize dtypes.
dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',
logits.dtype)
# Create Tensor of anchor points and distance between anchors.
precision_values, delta = _range_to_anchors_and_delta(precision_range, num_anchors, logits.dtype)
# Create lambdas with shape [1, num_labels, num_anchors].
lambdas, lambdas_variable = _create_dual_variable(
'lambdas',
shape=[1, num_labels, num_anchors],
dtype=logits.dtype,
initializer=lambdas_initializer,
collections=variables_collections,
trainable=trainable,
dual_rate_factor=dual_rate_factor)
# Create biases with shape [1, num_labels, num_anchors].
biases = tf.contrib.framework.model_variable(
name='biases',
shape=[1, num_labels, num_anchors],
dtype=logits.dtype,
initializer=tf.zeros_initializer(),
collections=variables_collections,
trainable=trainable)
# Maybe create label_priors.
label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)
label_priors = tf.reshape(label_priors, [1, num_labels, 1])
# Expand logits, labels, and weights to shape [batch_size, num_labels, 1].
logits = tf.expand_dims(logits, 2)
labels = tf.expand_dims(labels, 2)
weights = tf.expand_dims(weights, 2)
# Calculate weighted loss and other outputs. The log(2.0) term corrects for
# logloss not being an upper bound on the indicator function.
loss = weights * losses_utils.weighted_surrogate_loss(
labels,
logits + biases,
surrogate_type=surrogate_type,
positive_weights=1.0 + lambdas * (1.0 - precision_values),
negative_weights=lambdas * precision_values)
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
lambda_term = lambdas * (1.0 - precision_values) * label_priors * maybe_log2
per_anchor_loss = loss - lambda_term
per_label_loss = delta * tf.reduce_sum(per_anchor_loss, 2)
# Normalize the AUC such that a perfect score function will have AUC 1.0.
# Because precision_range is discretized into num_anchors + 1 intervals
# but only num_anchors terms are included in the Riemann sum, the
# effective length of the integration interval is `delta` less than the
# length of precision_range.
scaled_loss = tf.div(
per_label_loss, precision_range[1] - precision_range[0] - delta, name='AUC_Normalize')
scaled_loss = tf.reshape(scaled_loss, original_shape)
other_outputs = {
'lambdas':
lambdas_variable,
'biases':
biases,
'label_priors':
label_priors,
'true_positives_lower_bound':
true_positives_lower_bound(labels, logits, weights, surrogate_type),
'false_positives_upper_bound':
false_positives_upper_bound(labels, logits, weights, surrogate_type)
}
return scaled_loss, other_outputs
def roc_auc_loss(labels, logits, weights=1.0, surrogate_type='xent', scope=None):
"""Computes ROC AUC loss.
The area under the ROC curve is the probability p that a randomly chosen
positive example will be scored higher than a randomly chosen negative
example. This loss approximates 1-p by using a surrogate (either hinge loss or
cross entropy) for the indicator function. Specifically, the loss is:
sum_i sum_j w_i*w_j*loss(logit_i - logit_j)
where i ranges over the positive datapoints, j ranges over the negative
datapoints, logit_k denotes the logit (or score) of the k-th datapoint, and
loss is either the hinge or log loss given a positive label.
Args:
labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].
logits: A `Tensor` with the same shape and dtype as `labels`.
weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape
[batch_size] or [batch_size, num_labels].
surrogate_type: Either 'xent' or 'hinge', specifying which upper bound
should be used for the indicator function.
scope: Optional scope for `name_scope`.
Returns:
loss: A `Tensor` of the same shape as `logits` with the component-wise loss.
other_outputs: An empty dictionary, for consistency.
Raises:
ValueError: If `surrogate_type` is not `xent` or `hinge`.
"""
with tf.name_scope(scope, 'roc_auc', [labels, logits, weights]):
# Convert inputs to tensors and standardize dtypes.
labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)
# Create tensors of pairwise differences for logits and labels, and
# pairwise products of weights. These have shape
# [batch_size, batch_size, num_labels].
logits_difference = tf.expand_dims(logits, 0) - tf.expand_dims(logits, 1)
labels_difference = tf.expand_dims(labels, 0) - tf.expand_dims(labels, 1)
weights_product = tf.expand_dims(weights, 0) * tf.expand_dims(weights, 1)
signed_logits_difference = labels_difference * logits_difference
raw_loss = losses_utils.weighted_surrogate_loss(
labels=tf.ones_like(signed_logits_difference),
logits=signed_logits_difference,
surrogate_type=surrogate_type)
weighted_loss = weights_product * raw_loss
# Zero out entries of the loss where labels_difference zero (so loss is only
# computed on pairs with different labels).
loss = tf.reduce_mean(tf.abs(labels_difference) * weighted_loss, 0) * 0.5
loss = tf.reshape(loss, original_shape)
return loss, {}
def recall_at_precision_loss(labels,
logits,
target_precision,
weights=1.0,
dual_rate_factor=0.1,
label_priors=None,
surrogate_type='xent',
lambdas_initializer=tf.constant_initializer(1.0),
reuse=None,
variables_collections=None,
trainable=True,
scope=None):
"""Computes recall at precision loss.
The loss is based on a surrogate of the form
wt * w(+) * loss(+) + wt * w(-) * loss(-) - c * pi,
where:
- w(+) = 1 + lambdas * (1 - target_precision)
- loss(+) is the cross-entropy loss on the positive examples
- w(-) = lambdas * target_precision
- loss(-) is the cross-entropy loss on the negative examples
- wt is a scalar or tensor of per-example weights
- c = lambdas * (1 - target_precision)
- pi is the label_priors.
The per-example weights change not only the coefficients of individual
training examples, but how the examples are counted toward the constraint.
If `label_priors` is given, it MUST take `weights` into account. That is,
label_priors = P / (P + N)
where
P = sum_i (wt_i on positives)
N = sum_i (wt_i on negatives).
Args:
labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].
logits: A `Tensor` with the same shape as `labels`.
target_precision: The precision at which to compute the loss. Can be a
floating point value between 0 and 1 for a single precision value, or a
`Tensor` of shape [num_labels], holding each label's target precision
value.
weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape
[batch_size] or [batch_size, num_labels].
dual_rate_factor: A floating point value which controls the step size for
the Lagrange multipliers.
label_priors: None, or a floating point `Tensor` of shape [num_labels]
containing the prior probability of each label (i.e. the fraction of the
training data consisting of positive examples). If None, the label
priors are computed from `labels` with a moving average. See the notes
above regarding the interaction with `weights` and do not set this unless
you have a good reason to do so.
surrogate_type: Either 'xent' or 'hinge', specifying which upper bound
should be used for indicator functions.
lambdas_initializer: An initializer for the Lagrange multipliers.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for the variables.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
scope: Optional scope for `variable_scope`.
Returns:
loss: A `Tensor` of the same shape as `logits` with the component-wise
loss.
other_outputs: A dictionary of useful internal quantities for debugging. For
more details, see http://arxiv.org/pdf/1608.04802.pdf.
lambdas: A Tensor of shape [num_labels] consisting of the Lagrange
multipliers.
label_priors: A Tensor of shape [num_labels] consisting of the prior
probability of each label learned by the loss, if not provided.
true_positives_lower_bound: Lower bound on the number of true positives
given `labels` and `logits`. This is the same lower bound which is used
in the loss expression to be optimized.
false_positives_upper_bound: Upper bound on the number of false positives
given `labels` and `logits`. This is the same upper bound which is used
in the loss expression to be optimized.
Raises:
ValueError: If `logits` and `labels` do not have the same shape.
"""
with tf.variable_scope(scope, 'recall_at_precision', [logits, labels, label_priors], reuse=reuse):
labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)
num_labels = losses_utils.get_num_labels(logits)
# Convert other inputs to tensors and standardize dtypes.
target_precision = losses_utils.convert_and_cast(target_precision, 'target_precision',
logits.dtype)
dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',
logits.dtype)
# Create lambdas.
lambdas, lambdas_variable = _create_dual_variable(
'lambdas',
shape=[num_labels],
dtype=logits.dtype,
initializer=lambdas_initializer,
collections=variables_collections,
trainable=trainable,
dual_rate_factor=dual_rate_factor)
# Maybe create label_priors.
label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)
# Calculate weighted loss and other outputs. The log(2.0) term corrects for
# logloss not being an upper bound on the indicator function.
weighted_loss = weights * losses_utils.weighted_surrogate_loss(
labels,
logits,
surrogate_type=surrogate_type,
positive_weights=1.0 + lambdas * (1.0 - target_precision),
negative_weights=lambdas * target_precision)
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
lambda_term = lambdas * (1.0 - target_precision) * label_priors * maybe_log2
loss = tf.reshape(weighted_loss - lambda_term, original_shape)
other_outputs = {
'lambdas':
lambdas_variable,
'label_priors':
label_priors,
'true_positives_lower_bound':
true_positives_lower_bound(labels, logits, weights, surrogate_type),
'false_positives_upper_bound':
false_positives_upper_bound(labels, logits, weights, surrogate_type)
}
return loss, other_outputs
def precision_at_recall_loss(labels,
logits,
target_recall,
weights=1.0,
dual_rate_factor=0.1,
label_priors=None,
surrogate_type='xent',
lambdas_initializer=tf.constant_initializer(1.0),
reuse=None,
variables_collections=None,
trainable=True,
scope=None):
"""Computes precision at recall loss.
The loss is based on a surrogate of the form
wt * loss(-) + lambdas * (pi * (b - 1) + wt * loss(+))
where:
- loss(-) is the cross-entropy loss on the negative examples
- loss(+) is the cross-entropy loss on the positive examples
- wt is a scalar or tensor of per-example weights
- b is the target recall
- pi is the label_priors.
The per-example weights change not only the coefficients of individual
training examples, but how the examples are counted toward the constraint.
If `label_priors` is given, it MUST take `weights` into account. That is,
label_priors = P / (P + N)
where
P = sum_i (wt_i on positives)
N = sum_i (wt_i on negatives).
Args:
labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].
logits: A `Tensor` with the same shape as `labels`.
target_recall: The recall at which to compute the loss. Can be a floating
point value between 0 and 1 for a single target recall value, or a
`Tensor` of shape [num_labels] holding each label's target recall value.
weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape
[batch_size] or [batch_size, num_labels].
dual_rate_factor: A floating point value which controls the step size for
the Lagrange multipliers.
label_priors: None, or a floating point `Tensor` of shape [num_labels]
containing the prior probability of each label (i.e. the fraction of the
training data consisting of positive examples). If None, the label
priors are computed from `labels` with a moving average. See the notes
above regarding the interaction with `weights` and do not set this unless
you have a good reason to do so.
surrogate_type: Either 'xent' or 'hinge', specifying which upper bound
should be used for indicator functions.
lambdas_initializer: An initializer for the Lagrange multipliers.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for the variables.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
scope: Optional scope for `variable_scope`.
Returns:
loss: A `Tensor` of the same shape as `logits` with the component-wise
loss.
other_outputs: A dictionary of useful internal quantities for debugging. For
more details, see http://arxiv.org/pdf/1608.04802.pdf.
lambdas: A Tensor of shape [num_labels] consisting of the Lagrange
multipliers.
label_priors: A Tensor of shape [num_labels] consisting of the prior
probability of each label learned by the loss, if not provided.
true_positives_lower_bound: Lower bound on the number of true positives
given `labels` and `logits`. This is the same lower bound which is used
in the loss expression to be optimized.
false_positives_upper_bound: Upper bound on the number of false positives
given `labels` and `logits`. This is the same upper bound which is used
in the loss expression to be optimized.
"""
with tf.variable_scope(scope, 'precision_at_recall', [logits, labels, label_priors], reuse=reuse):
labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)
num_labels = losses_utils.get_num_labels(logits)
# Convert other inputs to tensors and standardize dtypes.
target_recall = losses_utils.convert_and_cast(target_recall, 'target_recall', logits.dtype)
dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',
logits.dtype)
# Create lambdas.
lambdas, lambdas_variable = _create_dual_variable(
'lambdas',
shape=[num_labels],
dtype=logits.dtype,
initializer=lambdas_initializer,
collections=variables_collections,
trainable=trainable,
dual_rate_factor=dual_rate_factor)
# Maybe create label_priors.
label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)
# Calculate weighted loss and other outputs. The log(2.0) term corrects for
# logloss not being an upper bound on the indicator function.
weighted_loss = weights * losses_utils.weighted_surrogate_loss(
labels, logits, surrogate_type, positive_weights=lambdas, negative_weights=1.0)
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
lambda_term = lambdas * label_priors * (target_recall - 1.0) * maybe_log2
loss = tf.reshape(weighted_loss + lambda_term, original_shape)
other_outputs = {
'lambdas':
lambdas_variable,
'label_priors':
label_priors,
'true_positives_lower_bound':
true_positives_lower_bound(labels, logits, weights, surrogate_type),
'false_positives_upper_bound':
false_positives_upper_bound(labels, logits, weights, surrogate_type)
}
return loss, other_outputs
def false_positive_rate_at_true_positive_rate_loss(labels,
logits,
target_rate,
weights=1.0,
dual_rate_factor=0.1,
label_priors=None,
surrogate_type='xent',
lambdas_initializer=tf.constant_initializer(1.0),
reuse=None,
variables_collections=None,
trainable=True,
scope=None):
"""Computes false positive rate at true positive rate loss.
Note that `true positive rate` is a synonym for Recall, and that minimizing
the false positive rate and maximizing precision are equivalent for a fixed
Recall. Therefore, this function is identical to precision_at_recall_loss.
The per-example weights change not only the coefficients of individual
training examples, but how the examples are counted toward the constraint.
If `label_priors` is given, it MUST take `weights` into account. That is,
label_priors = P / (P + N)
where
P = sum_i (wt_i on positives)
N = sum_i (wt_i on negatives).
Args:
labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].
logits: A `Tensor` with the same shape as `labels`.
target_rate: The true positive rate at which to compute the loss. Can be a
floating point value between 0 and 1 for a single true positive rate, or
a `Tensor` of shape [num_labels] holding each label's true positive rate.
weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape
[batch_size] or [batch_size, num_labels].
dual_rate_factor: A floating point value which controls the step size for
the Lagrange multipliers.
label_priors: None, or a floating point `Tensor` of shape [num_labels]
containing the prior probability of each label (i.e. the fraction of the
training data consisting of positive examples). If None, the label
priors are computed from `labels` with a moving average. See the notes
above regarding the interaction with `weights` and do not set this unless
you have a good reason to do so.
surrogate_type: Either 'xent' or 'hinge', specifying which upper bound
should be used for indicator functions. 'xent' will use the cross-entropy
loss surrogate, and 'hinge' will use the hinge loss.
lambdas_initializer: An initializer op for the Lagrange multipliers.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for the variables.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
scope: Optional scope for `variable_scope`.
Returns:
loss: A `Tensor` of the same shape as `logits` with the component-wise
loss.
other_outputs: A dictionary of useful internal quantities for debugging. For
more details, see http://arxiv.org/pdf/1608.04802.pdf.
lambdas: A Tensor of shape [num_labels] consisting of the Lagrange
multipliers.
label_priors: A Tensor of shape [num_labels] consisting of the prior
probability of each label learned by the loss, if not provided.
true_positives_lower_bound: Lower bound on the number of true positives
given `labels` and `logits`. This is the same lower bound which is used
in the loss expression to be optimized.
false_positives_upper_bound: Upper bound on the number of false positives
given `labels` and `logits`. This is the same upper bound which is used
in the loss expression to be optimized.
Raises:
ValueError: If `surrogate_type` is not `xent` or `hinge`.
"""
return precision_at_recall_loss(
labels=labels,
logits=logits,
target_recall=target_rate,
weights=weights,
dual_rate_factor=dual_rate_factor,
label_priors=label_priors,
surrogate_type=surrogate_type,
lambdas_initializer=lambdas_initializer,
reuse=reuse,
variables_collections=variables_collections,
trainable=trainable,
scope=scope)
def true_positive_rate_at_false_positive_rate_loss(labels,
logits,
target_rate,
weights=1.0,
dual_rate_factor=0.1,
label_priors=None,
surrogate_type='xent',
lambdas_initializer=tf.constant_initializer(1.0),
reuse=None,
variables_collections=None,
trainable=True,
scope=None):
"""Computes true positive rate at false positive rate loss.
The loss is based on a surrogate of the form
wt * loss(+) + lambdas * (wt * loss(-) - r * (1 - pi))
where:
- loss(-) is the loss on the negative examples
- loss(+) is the loss on the positive examples
- wt is a scalar or tensor of per-example weights
- r is the target rate
- pi is the label_priors.
The per-example weights change not only the coefficients of individual
training examples, but how the examples are counted toward the constraint.
If `label_priors` is given, it MUST take `weights` into account. That is,
label_priors = P / (P + N)
where
P = sum_i (wt_i on positives)
N = sum_i (wt_i on negatives).
Args:
labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].
logits: A `Tensor` with the same shape as `labels`.
target_rate: The false positive rate at which to compute the loss. Can be a
floating point value between 0 and 1 for a single false positive rate, or
a `Tensor` of shape [num_labels] holding each label's false positive rate.
weights: Coefficients for the loss. Must be a scalar or `Tensor` of shape
[batch_size] or [batch_size, num_labels].
dual_rate_factor: A floating point value which controls the step size for
the Lagrange multipliers.
label_priors: None, or a floating point `Tensor` of shape [num_labels]
containing the prior probability of each label (i.e. the fraction of the
training data consisting of positive examples). If None, the label
priors are computed from `labels` with a moving average. See the notes
above regarding the interaction with `weights` and do not set this unless
you have a good reason to do so.
surrogate_type: Either 'xent' or 'hinge', specifying which upper bound
should be used for indicator functions. 'xent' will use the cross-entropy
loss surrogate, and 'hinge' will use the hinge loss.
lambdas_initializer: An initializer op for the Lagrange multipliers.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for the variables.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
scope: Optional scope for `variable_scope`.
Returns:
loss: A `Tensor` of the same shape as `logits` with the component-wise
loss.
other_outputs: A dictionary of useful internal quantities for debugging. For
more details, see http://arxiv.org/pdf/1608.04802.pdf.
lambdas: A Tensor of shape [num_labels] consisting of the Lagrange
multipliers.
label_priors: A Tensor of shape [num_labels] consisting of the prior
probability of each label learned by the loss, if not provided.
true_positives_lower_bound: Lower bound on the number of true positives
given `labels` and `logits`. This is the same lower bound which is used
in the loss expression to be optimized.
false_positives_upper_bound: Upper bound on the number of false positives
given `labels` and `logits`. This is the same upper bound which is used
in the loss expression to be optimized.
Raises:
ValueError: If `surrogate_type` is not `xent` or `hinge`.
"""
with tf.variable_scope(scope, 'tpr_at_fpr', [labels, logits, label_priors], reuse=reuse):
labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)
num_labels = losses_utils.get_num_labels(logits)
# Convert other inputs to tensors and standardize dtypes.
target_rate = losses_utils.convert_and_cast(target_rate, 'target_rate', logits.dtype)
dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',
logits.dtype)
# Create lambdas.
lambdas, lambdas_variable = _create_dual_variable(
'lambdas',
shape=[num_labels],
dtype=logits.dtype,
initializer=lambdas_initializer,
collections=variables_collections,
trainable=trainable,
dual_rate_factor=dual_rate_factor)
# Maybe create label_priors.
label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)
# Loss op and other outputs. The log(2.0) term corrects for
# logloss not being an upper bound on the indicator function.
weighted_loss = weights * losses_utils.weighted_surrogate_loss(
labels,
logits,
surrogate_type=surrogate_type,
positive_weights=1.0,
negative_weights=lambdas)
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
lambda_term = lambdas * target_rate * (1.0 - label_priors) * maybe_log2
loss = tf.reshape(weighted_loss - lambda_term, original_shape)
other_outputs = {
'lambdas':
lambdas_variable,
'label_priors':
label_priors,
'true_positives_lower_bound':
true_positives_lower_bound(labels, logits, weights, surrogate_type),
'false_positives_upper_bound':
false_positives_upper_bound(labels, logits, weights, surrogate_type)
}
return loss, other_outputs
def _prepare_labels_logits_weights(labels, logits, weights):
"""Validates labels, logits, and weights.
Converts inputs to tensors, checks shape compatibility, and casts dtype if
necessary.
Args:
labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].
logits: A `Tensor` with the same shape as `labels`.
weights: Either `None` or a `Tensor` with shape broadcastable to `logits`.
Returns:
labels: Same as `labels` arg after possible conversion to tensor, cast, and
reshape.
logits: Same as `logits` arg after possible conversion to tensor and
reshape.
weights: Same as `weights` arg after possible conversion, cast, and reshape.
original_shape: Shape of `labels` and `logits` before reshape.
Raises:
ValueError: If `labels` and `logits` do not have the same shape.
"""
# Convert `labels` and `logits` to Tensors and standardize dtypes.
logits = tf.convert_to_tensor(logits, name='logits')
labels = losses_utils.convert_and_cast(labels, 'labels', logits.dtype.base_dtype)
weights = losses_utils.convert_and_cast(weights, 'weights', logits.dtype.base_dtype)
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError('logits and labels must have the same shape (%s vs %s)' % (logits.get_shape(),
labels.get_shape()))
original_shape = labels.get_shape().as_list()
if labels.get_shape().ndims > 0:
original_shape[0] = -1
if labels.get_shape().ndims <= 1:
labels = tf.reshape(labels, [-1, 1])
logits = tf.reshape(logits, [-1, 1])
if weights.get_shape().ndims == 1:
# Weights has shape [batch_size]. Reshape to [batch_size, 1].
weights = tf.reshape(weights, [-1, 1])
if weights.get_shape().ndims == 0:
# Weights is a scalar. Change shape of weights to match logits.
weights *= tf.ones_like(logits)
return labels, logits, weights, original_shape
def _range_to_anchors_and_delta(precision_range, num_anchors, dtype):
"""Calculates anchor points from precision range.
Args:
precision_range: As required in precision_recall_auc_loss.
num_anchors: int, number of equally spaced anchor points.
dtype: Data type of returned tensors.
Returns:
precision_values: A `Tensor` of data type dtype with equally spaced values
in the interval precision_range.
delta: The spacing between the values in precision_values.
Raises:
ValueError: If precision_range is invalid.
"""
# Validate precision_range.
if not 0 <= precision_range[0] <= precision_range[-1] <= 1:
raise ValueError(
'precision values must obey 0 <= %f <= %f <= 1' % (precision_range[0], precision_range[-1]))
if not 0 < len(precision_range) < 3:
raise ValueError('length of precision_range (%d) must be 1 or 2' % len(precision_range))
# Sets precision_values uniformly between min_precision and max_precision.
values = np.linspace(start=precision_range[0], stop=precision_range[1], num=num_anchors + 2)[1:-1]
precision_values = losses_utils.convert_and_cast(values, 'precision_values', dtype)
delta = losses_utils.convert_and_cast(values[0] - precision_range[0], 'delta', dtype)
# Makes precision_values [1, 1, num_anchors].
precision_values = losses_utils.expand_outer(precision_values, 3)
return precision_values, delta
def _create_dual_variable(name, shape, dtype, initializer, collections, trainable, dual_rate_factor):
"""Creates a new dual variable.
Dual variables are required to be nonnegative. If trainable, their gradient
is reversed so that they are maximized (rather than minimized) by the
optimizer.
Args:
name: A string, the name for the new variable.
shape: Shape of the new variable.
dtype: Data type for the new variable.
initializer: Initializer for the new variable.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
dual_rate_factor: A floating point value or `Tensor`. The learning rate for
the dual variable is scaled by this factor.
Returns:
dual_value: An op that computes the absolute value of the dual variable
and reverses its gradient.
dual_variable: The underlying variable itself.
"""
# We disable partitioning while constructing dual variables because they will
# be updated with assign, which is not available for partitioned variables.
partitioner = tf.get_variable_scope().partitioner
try:
tf.get_variable_scope().set_partitioner(None)
dual_variable = tf.contrib.framework.model_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
collections=collections,
trainable=trainable)
finally:
tf.get_variable_scope().set_partitioner(partitioner)
# Using the absolute value enforces nonnegativity.
dual_value = tf.abs(dual_variable)
if trainable:
# To reverse the gradient on the dual variable, multiply the gradient by
# -dual_rate_factor
dual_value = (tf.stop_gradient(
(1.0 + dual_rate_factor) * dual_value) - dual_rate_factor * dual_value)
return dual_value, dual_variable
def maybe_create_label_priors(label_priors, labels, weights, variables_collections):
"""Creates moving average ops to track label priors, if necessary.
Args:
label_priors: As required in e.g. precision_recall_auc_loss.
labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].
weights: As required in e.g. precision_recall_auc_loss.
variables_collections: Optional list of collections for the variables, if
any must be created.
Returns:
label_priors: A Tensor of shape [num_labels] consisting of the
weighted label priors, after updating with moving average ops if created.
"""
if label_priors is not None:
label_priors = losses_utils.convert_and_cast(
label_priors, name='label_priors', dtype=labels.dtype.base_dtype)
return tf.squeeze(label_priors)
label_priors = losses_utils.build_label_priors(
labels, weights, variables_collections=variables_collections)
return label_priors
def true_positives_lower_bound(labels, logits, weights, surrogate_type):
"""Calculate a lower bound on the number of true positives.
This lower bound on the number of true positives given `logits` and `labels`
is the same one used in the global objectives loss functions.
Args:
labels: A `Tensor` of shape [batch_size] or [batch_size, num_labels].
logits: A `Tensor` of shape [batch_size, num_labels] or
[batch_size, num_labels, num_anchors]. If the third dimension is present,
the lower bound is computed on each slice [:, :, k] independently.
weights: Per-example loss coefficients, with shape broadcast-compatible with
that of `labels`.
surrogate_type: Either 'xent' or 'hinge', specifying which upper bound
should be used for indicator functions.
Returns:
A `Tensor` of shape [num_labels] or [num_labels, num_anchors].
"""
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
if logits.get_shape().ndims == 3 and labels.get_shape().ndims < 3:
labels = tf.expand_dims(labels, 2)
loss_on_positives = losses_utils.weighted_surrogate_loss(
labels, logits, surrogate_type, negative_weights=0.0) / maybe_log2
return tf.reduce_sum(weights * (labels - loss_on_positives), 0)
def false_positives_upper_bound(labels, logits, weights, surrogate_type):
"""Calculate an upper bound on the number of false positives.
This upper bound on the number of false positives given `logits` and `labels`
is the same one used in the global objectives loss functions.
Args:
labels: A `Tensor` of shape [batch_size, num_labels]
logits: A `Tensor` of shape [batch_size, num_labels] or
[batch_size, num_labels, num_anchors]. If the third dimension is present,
the lower bound is computed on each slice [:, :, k] independently.
weights: Per-example loss coefficients, with shape broadcast-compatible with
that of `labels`.
surrogate_type: Either 'xent' or 'hinge', specifying which upper bound
should be used for indicator functions.
Returns:
A `Tensor` of shape [num_labels] or [num_labels, num_anchors].
"""
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
loss_on_negatives = losses_utils.weighted_surrogate_loss(
labels, logits, surrogate_type, positive_weights=0.0) / maybe_log2
return tf.reduce_sum(weights * loss_on_negatives, 0)
| 44.516271 | 101 | 0.681865 |
import numpy as np
import tensorflow as tf
import numbers
from functools import partial
from ..utils import util
from .layers import flatten, fully_connected as fc, relu
from .layers import gradient_reverse
from ..utils import losses_utils
log_loss = tf.losses.log_loss
def log_loss_custom(predictions, labels, eps=1e-7, name='log'):
with tf.name_scope(name):
predictions = tf.to_float(predictions)
labels = tf.to_float(labels)
predictions = tf.clip_by_value(predictions, eps, 1 - eps)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
loss = -tf.reduce_mean(labels * tf.log(predictions))
return loss
def log_loss_tf(predictions, labels, eps=1e-7, weights=1.0, name='log_loss'):
with tf.name_scope(name):
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = tf.to_float(predictions)
labels = tf.to_float(labels)
losses = -tf.multiply(labels, tf.log(predictions + eps)) - tf.multiply(
(1 - labels), tf.log(1 - predictions + eps))
return tf.losses.compute_weighted_loss(losses, weights)
def kappa_loss(predictions, labels, y_pow=1, eps=1e-15, num_ratings=5, batch_size=32, name='kappa'):
with tf.name_scope(name):
labels = tf.to_float(labels)
repeat_op = tf.to_float(
tf.tile(tf.reshape(tf.range(0, num_ratings), [num_ratings, 1]), [1, num_ratings]))
repeat_op_sq = tf.square((repeat_op - tf.transpose(repeat_op)))
weights = repeat_op_sq / tf.to_float((num_ratings - 1)**2)
pred_ = predictions**y_pow
try:
pred_norm = pred_ / \
(eps + tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1]))
except Exception:
pred_norm = pred_ / \
(eps + tf.reshape(tf.reduce_sum(pred_, 1), [batch_size, 1]))
hist_rater_a = tf.reduce_sum(pred_norm, 0)
hist_rater_b = tf.reduce_sum(labels, 0)
conf_mat = tf.matmul(tf.transpose(pred_norm), labels)
nom = tf.reduce_sum(weights * conf_mat)
denom = tf.reduce_sum(weights * tf.matmul(
tf.reshape(hist_rater_a, [num_ratings, 1]), tf.reshape(hist_rater_b, [1, num_ratings])) /
tf.to_float(batch_size))
try:
return -(1 - nom / denom)
except Exception:
return -(1 - nom / (denom + eps))
def kappa_log_loss(predictions,
labels,
label_smoothing=0.0,
y_pow=1,
batch_size=32,
log_scale=0.5,
num_classes=5,
log_offset=0.50,
name='kappa_log'):
with tf.name_scope(name):
num_classes = labels.get_shape()[-1].value
labels = tf.cast(labels, predictions.dtype)
if label_smoothing > 0:
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
labels = labels * smooth_positives + smooth_negatives
log_loss_res = log_loss(predictions, labels)
kappa_loss_res = kappa_loss(
predictions, labels, y_pow=y_pow, num_ratings=num_classes, batch_size=batch_size)
return kappa_loss_res + log_scale * (log_loss_res - log_offset)
def kappa_log_loss_clipped(predictions,
labels,
label_smoothing=0.0,
y_pow=1,
batch_size=32,
log_scale=0.5,
log_cutoff=0.80,
num_classes=5,
name='kappa_log_clipped'):
with tf.name_scope(name):
num_classes = labels.get_shape()[-1].value
labels = tf.cast(labels, predictions.dtype)
if label_smoothing > 0:
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
labels = labels * smooth_positives + smooth_negatives
log_loss_res = log_loss_tf(predictions, labels)
kappa_loss_res = kappa_loss(
predictions, labels, y_pow=y_pow, num_ratings=num_classes, batch_size=batch_size)
return kappa_loss_res + log_scale * tf.clip_by_value(log_loss_res, log_cutoff, 10**3)
def cross_entropy_loss(logits, labels, label_smoothing=0.0, weight=1.0, name='cross_entropy_loss'):
logits.get_shape().assert_is_compatible_with(labels.get_shape())
with tf.name_scope(name):
num_classes = labels.get_shape()[-1].value
labels = tf.cast(labels, logits.dtype)
if label_smoothing > 0:
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
labels = labels * smooth_positives + smooth_negatives
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='xentropy')
weight = tf.convert_to_tensor(weight, dtype=logits.dtype.base_dtype, name='loss_weight')
loss = tf.multiply(weight, tf.reduce_mean(cross_entropy), name='value')
return loss
def l1_l2_regularizer(var, weight_l1=1.0, weight_l2=1.0, name='l1_l2_regularizer'):
with tf.name_scope(name):
weight_l1_t = tf.convert_to_tensor(weight_l1, dtype=var.dtype.base_dtype, name='weight_l1')
weight_l2_t = tf.convert_to_tensor(weight_l2, dtype=var.dtype.base_dtype, name='weight_l2')
reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(var)), name='value_l1')
reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(var), name='value_l2')
return tf.add(reg_l1, reg_l2, name='value')
def l1_regularizer(scale, name='l1_regularizer'):
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' % scale)
if scale == 0.:
return lambda _: None
def l1(weights, name='l1_regularizer'):
with tf.name_scope(name):
my_scale = tf.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
return tf.multiply(my_scale, tf.reduce_sum(tf.abs(weights)), name=name)
return l1
def l2_regularizer(scale, name='l2_regularizer'):
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % (scale,))
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g.' % scale)
if scale == 0.:
return lambda _: None
def l2(weights, name='l2_regularizer'):
with tf.name_scope(name):
my_scale = tf.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name='scale')
return tf.multiply(my_scale, nn.l2_loss(weights), name=name)
return l2
def discretized_mix_logistic_loss(inputs,
predictions,
sum_all=True,
name='disretized_mix_logistic_loss'):
with tf.name_scope(name):
inputs_shape = list(map(int, inputs.get_shape()))
predictions_shape = list(map(int, predictions.get_shape()))
nr_mix = int(predictions_shape[-1] / 10)
logit_probs = predictions[:, :, :, :nr_mix]
predictions = tf.reshape(predictions[:, :, :, nr_mix:], inputs_shape + [nr_mix * 3])
means = predictions[:, :, :, :, :nr_mix]
log_scales = tf.maximum(predictions[:, :, :, :, nr_mix:2 * nr_mix], -7.)
coeffs = tf.nn.tanh(predictions[:, :, :, :, 2 * nr_mix:3 * nr_mix])
inputs = tf.reshape(inputs, inputs_shape + [1]) + tf.zeros(inputs_shape + [nr_mix])
m2 = tf.reshape(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :] * inputs[:, :, :, 0, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])
m3 = tf.reshape(
means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * inputs[:, :, :, 0, :] +
coeffs[:, :, :, 2, :] * inputs[:, :, :, 1, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix])
means = tf.concat([
tf.reshape(means[:, :, :, 0, :],
[inputs_shape[0], inputs_shape[1], inputs_shape[2], 1, nr_mix]), m2, m3
],
axis=3)
centered_inputs = inputs - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_inputs + 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
min_in = inv_stdv * (centered_inputs - 1. / 255.)
cdf_min = tf.nn.sigmoid(min_in)
log_cdf_plus = plus_in - tf.nn.softplus(plus_in)
log_one_minus_cdf_min = -tf.nn.softplus(min_in)
cdf_delta = cdf_plus - cdf_min
mid_in = inv_stdv * centered_inputs
log_pdf_mid = mid_in - log_scales - 2. * tf.nn.softplus(mid_in)
log_probs = tf.select(
inputs < -0.999, log_cdf_plus,
tf.select(
inputs > 0.999, log_one_minus_cdf_min,
tf.select(cdf_delta > 1e-5, tf.log(tf.maximum(cdf_delta, 1e-12)),
log_pdf_mid - np.log(127.5))))
log_probs = tf.reduce_sum(log_probs, 3) + \
log_prob_from_logits(logit_probs)
if sum_all:
return -tf.reduce_sum(log_sum_exp(log_probs))
else:
return -tf.reduce_sum(log_sum_exp(log_probs), [1, 2])
def mse_loss(pred, labels):
try:
batch_size = tf.cast(pred.shape[0], tf.float32)
except Exception as e:
print('Pred is a tf tensor %s' % str(e.message))
batch_size = tf.cast(tf.shape(pred)[0], tf.float32)
loss_val = tf.sqrt(2 * tf.nn.l2_loss(pred - labels)) / batch_size
return loss_val
def pullaway_loss(embeddings, name='pullaway_loss'):
with tf.name_scope(name):
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
similarity = tf.matmul(normalized_embeddings, normalized_embeddings, transpose_b=True)
batch_size = tf.cast(tf.shape(embeddings)[0], tf.float32)
pt_loss = (tf.reduce_sum(similarity) - batch_size) / \
(batch_size * (batch_size - 1))
return pt_loss
def log_sum_exp(x):
axis = len(x.get_shape()) - 1
m = tf.reduce_max(x, axis)
m2 = tf.reduce_max(x, axis, keep_dims=True)
return m + tf.log(tf.reduce_sum(tf.exp(x - m2), axis))
def log_prob_from_logits(x):
axis = len(x.get_shape()) - 1
m = tf.reduce_max(x, axis, keep_dims=True)
return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keep_dims=True))
def segment_loss(logits, labels, num_classes, head=None):
with tf.name_scope('segment_loss'):
epsilon = tf.constant(value=1e-7)
labels = tf.to_float(labels)
softmax = tf.nn.softmax(logits) + epsilon
if head is not None:
cross_entropy = -tf.reduce_sum(tf.mul(labels * tf.log(softmax), head), axis=[1])
else:
cross_entropy = -tf.reduce_sum(labels * tf.log(softmax), axis=[1])
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='xentropy_mean')
return cross_entropy_mean
def triplet_loss(anchor, positive, negative, alpha=0.2, name='triplet_loss'):
with tf.name_scope(name):
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
def decov_loss(xs, name='decov_loss'):
with tf.name_scope(name):
x = tf.reshape(xs, [int(xs.get_shape()[0]), -1])
m = tf.reduce_mean(x, 0, True)
z = tf.expand_dims(x - m, 2)
corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0, 2, 1])), 0)
corr_frob_sqr = tf.reduce_sum(tf.square(corr))
corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))
loss = 0.5 * (corr_frob_sqr - corr_diag_sqr)
return loss
def center_loss(features, label, alpha, num_classes, name='center_loss'):
with tf.variable_scope(name):
num_features = features.get_shape()[1]
centers = tf.get_variable(
'centers', [num_classes, num_features],
dtype=tf.float32,
initializer=tf.constant_initializer(0),
trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alpha) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
loss = tf.nn.l2_loss(features - centers_batch)
return loss, centers
def correlation_loss(source_samples, target_samples, weight, name='corr_loss'):
with tf.name_scope(name):
source_samples -= tf.reduce_mean(source_samples, 0)
target_samples -= tf.reduce_mean(target_samples, 0)
source_samples = tf.nn.l2_normalize(source_samples, 1)
target_samples = tf.nn.l2_normalize(target_samples, 1)
source_cov = tf.matmul(tf.transpose(source_samples), source_samples)
target_cov = tf.matmul(tf.transpose(target_samples), target_samples)
corr_loss = tf.reduce_mean(tf.square(source_cov - target_cov)) * weight
assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss])
with tf.control_dependencies([assert_op]):
tag = 'Correlation Loss'
barrier = tf.no_op(tag)
return corr_loss
def maximum_mean_discrepancy(x,
y,
kernel=util.gaussian_kernel_matrix,
name='maximum_mean_discrepancy'):
with tf.name_scope(name):
cost = tf.reduce_mean(kernel(x, x))
cost += tf.reduce_mean(kernel(y, y))
cost -= 2 * tf.reduce_mean(kernel(x, y))
cost = tf.where(cost > 0, cost, 0, name='value')
return cost
def mmd_loss(source_samples, target_samples, weight, name='mmd_loss'):
with tf.name_scope(name):
sigmas = [
1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6
]
gaussian_kernel = partial(util.gaussian_kernel_matrix, sigmas=tf.constant(sigmas))
loss_value = maximum_mean_discrepancy(source_samples, target_samples, kernel=gaussian_kernel)
loss_value = tf.maximum(1e-4, loss_value) * weight
assert_op = tf.Assert(tf.is_finite(loss_value), [loss_value])
with tf.control_dependencies([assert_op]):
tag = 'MMD_Loss'
barrier = tf.no_op(tag)
return loss_value
def dann_loss(source_samples, target_samples, weight, name='dann_loss'):
with tf.variable_scope(name):
batch_size = tf.shape(source_samples)[0]
samples = tf.concat(values=[source_samples, target_samples], axis=0)
samples = flatten(samples)
domain_selection_mask = tf.concat(
values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))], axis=0)
grl = gradient_reverse(samples)
grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1]))
grl = fc(grl, 100, True, None, activation=relu, name='fc1')
logits = fc(grl, 1, True, None, activation=None, name='fc2')
domain_predictions = tf.sigmoid(logits)
domain_loss = tf.losses.log_loss(domain_selection_mask, domain_predictions, weights=weight)
domain_accuracy = util.accuracy_tf(domain_selection_mask, tf.round(domain_predictions))
assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss])
with tf.control_dependencies([assert_op]):
tag_loss = 'losses/domain_loss'
barrier = tf.no_op(tag_loss)
return domain_loss
def difference_loss(private_samples, shared_samples, weight=1.0, name='difference_loss'):
with tf.name_scope(name):
private_samples -= tf.reduce_mean(private_samples, 0)
shared_samples -= tf.reduce_mean(shared_samples, 0)
private_samples = tf.nn.l2_normalize(private_samples, 1)
shared_samples = tf.nn.l2_normalize(shared_samples, 1)
correlation_matrix = tf.matmul(private_samples, shared_samples, transpose_a=True)
cost = tf.reduce_mean(tf.square(correlation_matrix)) * weight
cost = tf.where(cost > 0, cost, 0, name='value')
assert_op = tf.Assert(tf.is_finite(cost), [cost])
with tf.control_dependencies([assert_op]):
barrier = tf.no_op(name)
return cost
def log_quaternion_loss_batch(predictions, labels, name='log_quaternion_batch_loss'):
assertions = []
assertions.append(
tf.Assert(
tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), 1e-4)),
['The l2 norm of each prediction quaternion vector should be 1.']))
assertions.append(
tf.Assert(
tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)),
['The l2 norm of each label quaternion vector should be 1.']))
with tf.name_scope(name):
with tf.control_dependencies(assertions):
product = tf.multiply(predictions, labels)
internal_dot_products = tf.reduce_sum(product, [1])
logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))
return logcost
def log_quaternion_loss(predictions, labels, batch_size, name='log_quaternion_loss'):
with tf.name_scope(name):
logcost = log_quaternion_loss_batch(predictions, labels)
logcost = tf.reduce_sum(logcost, [0])
logcost = tf.multiply(logcost, 1.0 / batch_size, name='log_quaternion_loss')
return logcost
def random_perturbation_loss(embedded, length, loss_fn, perturb_norm_length=0.1):
noise = tf.random_normal(shape=tf.shape(embedded))
perturb = _scale_l2(_mask_by_length(noise, length), perturb_norm_length)
return loss_fn(embedded + perturb)
def adversarial_loss(embedded, loss, loss_fn, perturb_norm_length=0.1):
grad, = tf.gradients(
loss, embedded, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
grad = tf.stop_gradient(grad)
perturb = _scale_l2(grad, perturb_norm_length)
return loss_fn(embedded + perturb)
def virtual_adversarial_loss(logits,
embedded,
labels,
length,
logits_from_embedding_fn,
num_classes,
num_power_iteration=1,
small_constant_for_finite_diff=1e-3,
perturb_norm_length=0.1):
logits = tf.stop_gradient(logits)
weights = _end_of_seq_mask(labels, vocab_size)
d = _mask_by_length(tf.random_normal(shape=tf.shape(embedded)), length)
for _ in range(num_power_iteration):
d = _scale_l2(d, small_constant_for_finite_diff)
d_logits = logits_from_embedding_fn(embedded + d)
kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes)
d, = tf.gradients(kl, d, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
d = tf.stop_gradient(d)
perturb = _scale_l2(_mask_by_length(d, length), perturb_norm_length)
vadv_logits = logits_from_embedding_fn(embedded + perturb)
return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes)
def random_perturbation_loss_brnn(embedded, length, loss_fn, perturb_norm_length=0.1):
noise = [tf.random_normal(shape=tf.shape(emb)) for emb in embedded]
masked = [_mask_by_length(n, length) for n in noise]
scaled = [_scale_l2(m, perturb_norm_length) for m in masked]
return loss_fn([e + s for (e, s) in zip(embedded, scaled)])
def adversarial_loss_brnn(embedded, loss, loss_fn, perurb_norm_length=0.1):
grads = tf.gradients(
loss, embedded, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
adv_exs = [
emb + _scale_l2(tf.stop_gradient(g), perturb_norm_length) for emb, g in zip(embedded, grads)
]
return loss_fn(adv_exs)
def virtual_adversarial_loss_brnn(logits,
embedded,
labels,
length,
logits_from_embedding_fn,
vocab_size,
num_classes,
num_power_iteration=1,
small_constant_for_finite_diff=1e-3,
perturb_norm_length=0.1):
logits = tf.stop_gradient(logits)
weights = _end_of_seq_mask(labels, vocab_size)
perturbs = [_mask_by_length(tf.random_normal(shape=tf.shape(emb)), length) for emb in embedded]
for _ in range(num_power_iteration):
perturbs = [_scale_l2(d, small_constant_for_finite_diff) for d in perturbs]
d_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)])
kl = _kl_divergence_with_logits(logits, d_logits, weights, num_classes)
perturbs = tf.gradients(
kl, perturbs, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
perturbs = [tf.stop_gradient(d) for d in perturbs]
perturbs = [_scale_l2(_mask_by_length(d, length), perturb_norm_length) for d in perturbs]
vadv_logits = logits_from_embedding_fn([emb + d for (emb, d) in zip(embedded, perturbs)])
return _kl_divergence_with_logits(logits, vadv_logits, weights, num_classes)
def _mask_by_length(t, length):
maxlen = t.get_shape().as_list()[1]
mask = tf.sequence_mask(length, maxlen=maxlen)
mask = tf.expand_dims(tf.cast(mask, tf.float32), -1)
return t * mask
def _scale_l2(x, norm_length):
alpha = tf.reduce_max(tf.abs(x), (1, 2), keep_dims=True) + 1e-12
l2_norm = alpha * tf.sqrt(tf.reduce_sum(tf.pow(x / alpha, 2), (1, 2), keep_dims=True) + 1e-6)
x_unit = x / l2_norm
return norm_length * x_unit
def _end_of_seq_mask(tokens, vocab_size):
eos_id = vocab_size - 1
return tf.cast(tf.equal(tokens, eos_id), tf.float32)
def _kl_divergence_with_logits(q_logits, p_logits, weights, num_classes):
if num_classes == 2:
q = tf.nn.sigmoid(q_logits)
p = tf.nn.sigmoid(p_logits)
kl = (-tf.nn.sigmoid_cross_entropy_with_logits(logits=q_logits, labels=q) +
f.nn.sigmoid_cross_entropy_with_logits(logits=p_logits, labels=q))
else:
q = tf.nn.softmax(q_logits)
p = tf.nn.softmax(p_logits)
kl = tf.reduce_sum(q * (tf.log(q) - tf.log(p)), 1)
num_labels = tf.reduce_sum(weights)
num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels)
kl.get_shape().assert_has_rank(2)
weights.get_shape().assert_has_rank(1)
loss = tf.identity(tf.reduce_sum(tf.expand_dims(weights, -1) * kl) / num_labels, name='kl')
return loss
def cross_entropy_sequence_loss(logits, targets, sequence_length):
with tf.name_scope("cross_entropy_sequence_loss"):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets)
loss_mask = tf.sequence_mask(tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])
return losses
def dice_loss(predictions, targets, weights=1., name='dice_loss'):
with tf.name_scope(name):
targets = tf.to_float(targets)
intersection = 2 * tf.reduce_sum(predictions * targets) + weights
union = weights + tf.reduce_sum(predictions) + tf.reduce_sum(targets)
loss = -(intersection / (union))
return loss
def precision_recall_auc_loss(labels,
logits,
precision_range=(0.0, 1.0),
num_anchors=20,
weights=1.0,
dual_rate_factor=0.1,
label_priors=None,
surrogate_type='xent',
lambdas_initializer=tf.constant_initializer(1.0),
reuse=None,
variables_collections=None,
trainable=True,
scope=None):
with tf.variable_scope(scope, 'precision_recall_auc', [labels, logits, label_priors], reuse=reuse):
labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)
num_labels = losses_utils.get_num_labels(logits)
dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',
logits.dtype)
precision_values, delta = _range_to_anchors_and_delta(precision_range, num_anchors, logits.dtype)
lambdas, lambdas_variable = _create_dual_variable(
'lambdas',
shape=[1, num_labels, num_anchors],
dtype=logits.dtype,
initializer=lambdas_initializer,
collections=variables_collections,
trainable=trainable,
dual_rate_factor=dual_rate_factor)
biases = tf.contrib.framework.model_variable(
name='biases',
shape=[1, num_labels, num_anchors],
dtype=logits.dtype,
initializer=tf.zeros_initializer(),
collections=variables_collections,
trainable=trainable)
label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)
label_priors = tf.reshape(label_priors, [1, num_labels, 1])
logits = tf.expand_dims(logits, 2)
labels = tf.expand_dims(labels, 2)
weights = tf.expand_dims(weights, 2)
loss = weights * losses_utils.weighted_surrogate_loss(
labels,
logits + biases,
surrogate_type=surrogate_type,
positive_weights=1.0 + lambdas * (1.0 - precision_values),
negative_weights=lambdas * precision_values)
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
lambda_term = lambdas * (1.0 - precision_values) * label_priors * maybe_log2
per_anchor_loss = loss - lambda_term
per_label_loss = delta * tf.reduce_sum(per_anchor_loss, 2)
scaled_loss = tf.div(
per_label_loss, precision_range[1] - precision_range[0] - delta, name='AUC_Normalize')
scaled_loss = tf.reshape(scaled_loss, original_shape)
other_outputs = {
'lambdas':
lambdas_variable,
'biases':
biases,
'label_priors':
label_priors,
'true_positives_lower_bound':
true_positives_lower_bound(labels, logits, weights, surrogate_type),
'false_positives_upper_bound':
false_positives_upper_bound(labels, logits, weights, surrogate_type)
}
return scaled_loss, other_outputs
def roc_auc_loss(labels, logits, weights=1.0, surrogate_type='xent', scope=None):
with tf.name_scope(scope, 'roc_auc', [labels, logits, weights]):
labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)
logits_difference = tf.expand_dims(logits, 0) - tf.expand_dims(logits, 1)
labels_difference = tf.expand_dims(labels, 0) - tf.expand_dims(labels, 1)
weights_product = tf.expand_dims(weights, 0) * tf.expand_dims(weights, 1)
signed_logits_difference = labels_difference * logits_difference
raw_loss = losses_utils.weighted_surrogate_loss(
labels=tf.ones_like(signed_logits_difference),
logits=signed_logits_difference,
surrogate_type=surrogate_type)
weighted_loss = weights_product * raw_loss
loss = tf.reduce_mean(tf.abs(labels_difference) * weighted_loss, 0) * 0.5
loss = tf.reshape(loss, original_shape)
return loss, {}
def recall_at_precision_loss(labels,
logits,
target_precision,
weights=1.0,
dual_rate_factor=0.1,
label_priors=None,
surrogate_type='xent',
lambdas_initializer=tf.constant_initializer(1.0),
reuse=None,
variables_collections=None,
trainable=True,
scope=None):
with tf.variable_scope(scope, 'recall_at_precision', [logits, labels, label_priors], reuse=reuse):
labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)
num_labels = losses_utils.get_num_labels(logits)
target_precision = losses_utils.convert_and_cast(target_precision, 'target_precision',
logits.dtype)
dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',
logits.dtype)
lambdas, lambdas_variable = _create_dual_variable(
'lambdas',
shape=[num_labels],
dtype=logits.dtype,
initializer=lambdas_initializer,
collections=variables_collections,
trainable=trainable,
dual_rate_factor=dual_rate_factor)
label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)
weighted_loss = weights * losses_utils.weighted_surrogate_loss(
labels,
logits,
surrogate_type=surrogate_type,
positive_weights=1.0 + lambdas * (1.0 - target_precision),
negative_weights=lambdas * target_precision)
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
lambda_term = lambdas * (1.0 - target_precision) * label_priors * maybe_log2
loss = tf.reshape(weighted_loss - lambda_term, original_shape)
other_outputs = {
'lambdas':
lambdas_variable,
'label_priors':
label_priors,
'true_positives_lower_bound':
true_positives_lower_bound(labels, logits, weights, surrogate_type),
'false_positives_upper_bound':
false_positives_upper_bound(labels, logits, weights, surrogate_type)
}
return loss, other_outputs
def precision_at_recall_loss(labels,
logits,
target_recall,
weights=1.0,
dual_rate_factor=0.1,
label_priors=None,
surrogate_type='xent',
lambdas_initializer=tf.constant_initializer(1.0),
reuse=None,
variables_collections=None,
trainable=True,
scope=None):
with tf.variable_scope(scope, 'precision_at_recall', [logits, labels, label_priors], reuse=reuse):
labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)
num_labels = losses_utils.get_num_labels(logits)
target_recall = losses_utils.convert_and_cast(target_recall, 'target_recall', logits.dtype)
dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',
logits.dtype)
lambdas, lambdas_variable = _create_dual_variable(
'lambdas',
shape=[num_labels],
dtype=logits.dtype,
initializer=lambdas_initializer,
collections=variables_collections,
trainable=trainable,
dual_rate_factor=dual_rate_factor)
label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)
weighted_loss = weights * losses_utils.weighted_surrogate_loss(
labels, logits, surrogate_type, positive_weights=lambdas, negative_weights=1.0)
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
lambda_term = lambdas * label_priors * (target_recall - 1.0) * maybe_log2
loss = tf.reshape(weighted_loss + lambda_term, original_shape)
other_outputs = {
'lambdas':
lambdas_variable,
'label_priors':
label_priors,
'true_positives_lower_bound':
true_positives_lower_bound(labels, logits, weights, surrogate_type),
'false_positives_upper_bound':
false_positives_upper_bound(labels, logits, weights, surrogate_type)
}
return loss, other_outputs
def false_positive_rate_at_true_positive_rate_loss(labels,
logits,
target_rate,
weights=1.0,
dual_rate_factor=0.1,
label_priors=None,
surrogate_type='xent',
lambdas_initializer=tf.constant_initializer(1.0),
reuse=None,
variables_collections=None,
trainable=True,
scope=None):
return precision_at_recall_loss(
labels=labels,
logits=logits,
target_recall=target_rate,
weights=weights,
dual_rate_factor=dual_rate_factor,
label_priors=label_priors,
surrogate_type=surrogate_type,
lambdas_initializer=lambdas_initializer,
reuse=reuse,
variables_collections=variables_collections,
trainable=trainable,
scope=scope)
def true_positive_rate_at_false_positive_rate_loss(labels,
logits,
target_rate,
weights=1.0,
dual_rate_factor=0.1,
label_priors=None,
surrogate_type='xent',
lambdas_initializer=tf.constant_initializer(1.0),
reuse=None,
variables_collections=None,
trainable=True,
scope=None):
with tf.variable_scope(scope, 'tpr_at_fpr', [labels, logits, label_priors], reuse=reuse):
labels, logits, weights, original_shape = _prepare_labels_logits_weights(labels, logits, weights)
num_labels = losses_utils.get_num_labels(logits)
target_rate = losses_utils.convert_and_cast(target_rate, 'target_rate', logits.dtype)
dual_rate_factor = losses_utils.convert_and_cast(dual_rate_factor, 'dual_rate_factor',
logits.dtype)
lambdas, lambdas_variable = _create_dual_variable(
'lambdas',
shape=[num_labels],
dtype=logits.dtype,
initializer=lambdas_initializer,
collections=variables_collections,
trainable=trainable,
dual_rate_factor=dual_rate_factor)
label_priors = maybe_create_label_priors(label_priors, labels, weights, variables_collections)
weighted_loss = weights * losses_utils.weighted_surrogate_loss(
labels,
logits,
surrogate_type=surrogate_type,
positive_weights=1.0,
negative_weights=lambdas)
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
lambda_term = lambdas * target_rate * (1.0 - label_priors) * maybe_log2
loss = tf.reshape(weighted_loss - lambda_term, original_shape)
other_outputs = {
'lambdas':
lambdas_variable,
'label_priors':
label_priors,
'true_positives_lower_bound':
true_positives_lower_bound(labels, logits, weights, surrogate_type),
'false_positives_upper_bound':
false_positives_upper_bound(labels, logits, weights, surrogate_type)
}
return loss, other_outputs
def _prepare_labels_logits_weights(labels, logits, weights):
logits = tf.convert_to_tensor(logits, name='logits')
labels = losses_utils.convert_and_cast(labels, 'labels', logits.dtype.base_dtype)
weights = losses_utils.convert_and_cast(weights, 'weights', logits.dtype.base_dtype)
try:
labels.get_shape().merge_with(logits.get_shape())
except ValueError:
raise ValueError('logits and labels must have the same shape (%s vs %s)' % (logits.get_shape(),
labels.get_shape()))
original_shape = labels.get_shape().as_list()
if labels.get_shape().ndims > 0:
original_shape[0] = -1
if labels.get_shape().ndims <= 1:
labels = tf.reshape(labels, [-1, 1])
logits = tf.reshape(logits, [-1, 1])
if weights.get_shape().ndims == 1:
weights = tf.reshape(weights, [-1, 1])
if weights.get_shape().ndims == 0:
weights *= tf.ones_like(logits)
return labels, logits, weights, original_shape
def _range_to_anchors_and_delta(precision_range, num_anchors, dtype):
if not 0 <= precision_range[0] <= precision_range[-1] <= 1:
raise ValueError(
'precision values must obey 0 <= %f <= %f <= 1' % (precision_range[0], precision_range[-1]))
if not 0 < len(precision_range) < 3:
raise ValueError('length of precision_range (%d) must be 1 or 2' % len(precision_range))
values = np.linspace(start=precision_range[0], stop=precision_range[1], num=num_anchors + 2)[1:-1]
precision_values = losses_utils.convert_and_cast(values, 'precision_values', dtype)
delta = losses_utils.convert_and_cast(values[0] - precision_range[0], 'delta', dtype)
precision_values = losses_utils.expand_outer(precision_values, 3)
return precision_values, delta
def _create_dual_variable(name, shape, dtype, initializer, collections, trainable, dual_rate_factor):
partitioner = tf.get_variable_scope().partitioner
try:
tf.get_variable_scope().set_partitioner(None)
dual_variable = tf.contrib.framework.model_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
collections=collections,
trainable=trainable)
finally:
tf.get_variable_scope().set_partitioner(partitioner)
dual_value = tf.abs(dual_variable)
if trainable:
dual_value = (tf.stop_gradient(
(1.0 + dual_rate_factor) * dual_value) - dual_rate_factor * dual_value)
return dual_value, dual_variable
def maybe_create_label_priors(label_priors, labels, weights, variables_collections):
if label_priors is not None:
label_priors = losses_utils.convert_and_cast(
label_priors, name='label_priors', dtype=labels.dtype.base_dtype)
return tf.squeeze(label_priors)
label_priors = losses_utils.build_label_priors(
labels, weights, variables_collections=variables_collections)
return label_priors
def true_positives_lower_bound(labels, logits, weights, surrogate_type):
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
if logits.get_shape().ndims == 3 and labels.get_shape().ndims < 3:
labels = tf.expand_dims(labels, 2)
loss_on_positives = losses_utils.weighted_surrogate_loss(
labels, logits, surrogate_type, negative_weights=0.0) / maybe_log2
return tf.reduce_sum(weights * (labels - loss_on_positives), 0)
def false_positives_upper_bound(labels, logits, weights, surrogate_type):
maybe_log2 = tf.log(2.0) if surrogate_type == 'xent' else 1.0
maybe_log2 = tf.cast(maybe_log2, logits.dtype.base_dtype)
loss_on_negatives = losses_utils.weighted_surrogate_loss(
labels, logits, surrogate_type, positive_weights=0.0) / maybe_log2
return tf.reduce_sum(weights * loss_on_negatives, 0)
| true | true |
f72880d18cdcfd38e6c5129da48d768736469e27 | 533 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractWaterBlog.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractWaterBlog.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractWaterBlog.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z |
def extractWaterBlog(item):
'''
Parser for 'water.blog'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| 24.227273 | 104 | 0.622889 |
def extractWaterBlog(item):
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| true | true |
f7288141fdc5e6616cfe73fbefd0235cfc5b8e9c | 26,921 | py | Python | beets/ui/__init__.py | daveisadork/beets | 3d94900d7e328bcdf70575b7054438cc9cd1bba4 | [
"MIT"
] | null | null | null | beets/ui/__init__.py | daveisadork/beets | 3d94900d7e328bcdf70575b7054438cc9cd1bba4 | [
"MIT"
] | null | null | null | beets/ui/__init__.py | daveisadork/beets | 3d94900d7e328bcdf70575b7054438cc9cd1bba4 | [
"MIT"
] | null | null | null | # This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module contains all of the core logic for beets' command-line
interface. To invoke the CLI, just call beets.ui.main(). The actual
CLI commands are implemented in the ui.commands module.
"""
from __future__ import print_function
import locale
import optparse
import textwrap
import sys
from difflib import SequenceMatcher
import logging
import sqlite3
import errno
import re
import struct
import traceback
from beets import library
from beets import plugins
from beets import util
from beets.util.functemplate import Template
from beets import config
from beets.util import confit
from beets.autotag import mb
# On Windows platforms, use colorama to support "ANSI" terminal colors.
if sys.platform == 'win32':
try:
import colorama
except ImportError:
pass
else:
colorama.init()
# Constants.
PF_KEY_QUERIES = {
'comp': 'comp:true',
'singleton': 'singleton:true',
}
# UI exception. Commands should throw this in order to display
# nonrecoverable errors to the user.
class UserError(Exception):
pass
# Main logger.
log = logging.getLogger('beets')
# Utilities.
def _encoding():
"""Tries to guess the encoding used by the terminal."""
# Configured override?
encoding = config['terminal_encoding'].get()
if encoding:
return encoding
# Determine from locale settings.
try:
return locale.getdefaultlocale()[1] or 'utf8'
except ValueError:
# Invalid locale environment variable setting. To avoid
# failing entirely for no good reason, assume UTF-8.
return 'utf8'
def decargs(arglist):
"""Given a list of command-line argument bytestrings, attempts to
decode them to Unicode strings.
"""
return [s.decode(_encoding()) for s in arglist]
def print_(*strings):
"""Like print, but rather than raising an error when a character
is not in the terminal's encoding's character set, just silently
replaces it.
"""
if strings:
if isinstance(strings[0], unicode):
txt = u' '.join(strings)
else:
txt = ' '.join(strings)
else:
txt = u''
if isinstance(txt, unicode):
txt = txt.encode(_encoding(), 'replace')
print(txt)
def input_(prompt=None):
"""Like `raw_input`, but decodes the result to a Unicode string.
Raises a UserError if stdin is not available. The prompt is sent to
stdout rather than stderr. A printed between the prompt and the
input cursor.
"""
# raw_input incorrectly sends prompts to stderr, not stdout, so we
# use print() explicitly to display prompts.
# http://bugs.python.org/issue1927
if prompt:
if isinstance(prompt, unicode):
prompt = prompt.encode(_encoding(), 'replace')
print(prompt, end=' ')
try:
resp = raw_input()
except EOFError:
raise UserError('stdin stream ended while input required')
return resp.decode(sys.stdin.encoding or 'utf8', 'ignore')
def input_options(options, require=False, prompt=None, fallback_prompt=None,
numrange=None, default=None, max_width=72):
"""Prompts a user for input. The sequence of `options` defines the
choices the user has. A single-letter shortcut is inferred for each
option; the user's choice is returned as that single, lower-case
letter. The options should be provided as lower-case strings unless
a particular shortcut is desired; in that case, only that letter
should be capitalized.
By default, the first option is the default. `default` can be provided to
override this. If `require` is provided, then there is no default. The
prompt and fallback prompt are also inferred but can be overridden.
If numrange is provided, it is a pair of `(high, low)` (both ints)
indicating that, in addition to `options`, the user may enter an
integer in that inclusive range.
`max_width` specifies the maximum number of columns in the
automatically generated prompt string.
"""
# Assign single letters to each option. Also capitalize the options
# to indicate the letter.
letters = {}
display_letters = []
capitalized = []
first = True
for option in options:
# Is a letter already capitalized?
for letter in option:
if letter.isalpha() and letter.upper() == letter:
found_letter = letter
break
else:
# Infer a letter.
for letter in option:
if not letter.isalpha():
continue # Don't use punctuation.
if letter not in letters:
found_letter = letter
break
else:
raise ValueError('no unambiguous lettering found')
letters[found_letter.lower()] = option
index = option.index(found_letter)
# Mark the option's shortcut letter for display.
if not require and ((default is None and not numrange and first) or
(isinstance(default, basestring) and
found_letter.lower() == default.lower())):
# The first option is the default; mark it.
show_letter = '[%s]' % found_letter.upper()
is_default = True
else:
show_letter = found_letter.upper()
is_default = False
# Colorize the letter shortcut.
show_letter = colorize('turquoise' if is_default else 'blue',
show_letter)
# Insert the highlighted letter back into the word.
capitalized.append(
option[:index] + show_letter + option[index+1:]
)
display_letters.append(found_letter.upper())
first = False
# The default is just the first option if unspecified.
if require:
default = None
elif default is None:
if numrange:
default = numrange[0]
else:
default = display_letters[0].lower()
# Make a prompt if one is not provided.
if not prompt:
prompt_parts = []
prompt_part_lengths = []
if numrange:
if isinstance(default, int):
default_name = str(default)
default_name = colorize('turquoise', default_name)
tmpl = '# selection (default %s)'
prompt_parts.append(tmpl % default_name)
prompt_part_lengths.append(len(tmpl % str(default)))
else:
prompt_parts.append('# selection')
prompt_part_lengths.append(len(prompt_parts[-1]))
prompt_parts += capitalized
prompt_part_lengths += [len(s) for s in options]
# Wrap the query text.
prompt = ''
line_length = 0
for i, (part, length) in enumerate(zip(prompt_parts,
prompt_part_lengths)):
# Add punctuation.
if i == len(prompt_parts) - 1:
part += '?'
else:
part += ','
length += 1
# Choose either the current line or the beginning of the next.
if line_length + length + 1 > max_width:
prompt += '\n'
line_length = 0
if line_length != 0:
# Not the beginning of the line; need a space.
part = ' ' + part
length += 1
prompt += part
line_length += length
# Make a fallback prompt too. This is displayed if the user enters
# something that is not recognized.
if not fallback_prompt:
fallback_prompt = 'Enter one of '
if numrange:
fallback_prompt += '%i-%i, ' % numrange
fallback_prompt += ', '.join(display_letters) + ':'
resp = input_(prompt)
while True:
resp = resp.strip().lower()
# Try default option.
if default is not None and not resp:
resp = default
# Try an integer input if available.
if numrange:
try:
resp = int(resp)
except ValueError:
pass
else:
low, high = numrange
if low <= resp <= high:
return resp
else:
resp = None
# Try a normal letter input.
if resp:
resp = resp[0]
if resp in letters:
return resp
# Prompt for new input.
resp = input_(fallback_prompt)
def input_yn(prompt, require=False):
"""Prompts the user for a "yes" or "no" response. The default is
"yes" unless `require` is `True`, in which case there is no default.
"""
sel = input_options(
('y', 'n'), require, prompt, 'Enter Y or N:'
)
return sel == 'y'
def human_bytes(size):
"""Formats size, a number of bytes, in a human-readable way."""
suffices = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'HB']
for suffix in suffices:
if size < 1024:
return "%3.1f %s" % (size, suffix)
size /= 1024.0
return "big"
def human_seconds(interval):
"""Formats interval, a number of seconds, as a human-readable time
interval using English words.
"""
units = [
(1, 'second'),
(60, 'minute'),
(60, 'hour'),
(24, 'day'),
(7, 'week'),
(52, 'year'),
(10, 'decade'),
]
for i in range(len(units)-1):
increment, suffix = units[i]
next_increment, _ = units[i+1]
interval /= float(increment)
if interval < next_increment:
break
else:
# Last unit.
increment, suffix = units[-1]
interval /= float(increment)
return "%3.1f %ss" % (interval, suffix)
def human_seconds_short(interval):
"""Formats a number of seconds as a short human-readable M:SS
string.
"""
interval = int(interval)
return u'%i:%02i' % (interval // 60, interval % 60)
# ANSI terminal colorization code heavily inspired by pygments:
# http://dev.pocoo.org/hg/pygments-main/file/b2deea5b5030/pygments/console.py
# (pygments is by Tim Hatch, Armin Ronacher, et al.)
COLOR_ESCAPE = "\x1b["
DARK_COLORS = ["black", "darkred", "darkgreen", "brown", "darkblue",
"purple", "teal", "lightgray"]
LIGHT_COLORS = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
RESET_COLOR = COLOR_ESCAPE + "39;49;00m"
def _colorize(color, text):
"""Returns a string that prints the given text in the given color
in a terminal that is ANSI color-aware. The color must be something
in DARK_COLORS or LIGHT_COLORS.
"""
if color in DARK_COLORS:
escape = COLOR_ESCAPE + "%im" % (DARK_COLORS.index(color) + 30)
elif color in LIGHT_COLORS:
escape = COLOR_ESCAPE + "%i;01m" % (LIGHT_COLORS.index(color) + 30)
else:
raise ValueError('no such color %s', color)
return escape + text + RESET_COLOR
def colorize(color, text):
"""Colorize text if colored output is enabled. (Like _colorize but
conditional.)
"""
if config['color']:
return _colorize(color, text)
else:
return text
def _colordiff(a, b, highlight='red', minor_highlight='lightgray'):
"""Given two values, return the same pair of strings except with
their differences highlighted in the specified color. Strings are
highlighted intelligently to show differences; other values are
stringified and highlighted in their entirety.
"""
if not isinstance(a, basestring) or not isinstance(b, basestring):
# Non-strings: use ordinary equality.
a = unicode(a)
b = unicode(b)
if a == b:
return a, b
else:
return colorize(highlight, a), colorize(highlight, b)
if isinstance(a, bytes) or isinstance(b, bytes):
# A path field.
a = util.displayable_path(a)
b = util.displayable_path(b)
a_out = []
b_out = []
matcher = SequenceMatcher(lambda x: False, a, b)
for op, a_start, a_end, b_start, b_end in matcher.get_opcodes():
if op == 'equal':
# In both strings.
a_out.append(a[a_start:a_end])
b_out.append(b[b_start:b_end])
elif op == 'insert':
# Right only.
b_out.append(colorize(highlight, b[b_start:b_end]))
elif op == 'delete':
# Left only.
a_out.append(colorize(highlight, a[a_start:a_end]))
elif op == 'replace':
# Right and left differ. Colorise with second highlight if
# it's just a case change.
if a[a_start:a_end].lower() != b[b_start:b_end].lower():
color = highlight
else:
color = minor_highlight
a_out.append(colorize(color, a[a_start:a_end]))
b_out.append(colorize(color, b[b_start:b_end]))
else:
assert(False)
return u''.join(a_out), u''.join(b_out)
def colordiff(a, b, highlight='red'):
"""Colorize differences between two values if color is enabled.
(Like _colordiff but conditional.)
"""
if config['color']:
return _colordiff(a, b, highlight)
else:
return unicode(a), unicode(b)
def color_diff_suffix(a, b, highlight='red'):
"""Colorize the differing suffix between two strings."""
a, b = unicode(a), unicode(b)
if not config['color']:
return a, b
# Fast path.
if a == b:
return a, b
# Find the longest common prefix.
first_diff = None
for i in range(min(len(a), len(b))):
if a[i] != b[i]:
first_diff = i
break
else:
first_diff = min(len(a), len(b))
# Colorize from the first difference on.
return a[:first_diff] + colorize(highlight, a[first_diff:]), \
b[:first_diff] + colorize(highlight, b[first_diff:])
def get_path_formats(subview=None):
"""Get the configuration's path formats as a list of query/template
pairs.
"""
path_formats = []
subview = subview or config['paths']
for query, view in subview.items():
query = PF_KEY_QUERIES.get(query, query) # Expand common queries.
path_formats.append((query, Template(view.get(unicode))))
return path_formats
def get_replacements():
"""Confit validation function that reads regex/string pairs.
"""
replacements = []
for pattern, repl in config['replace'].get(dict).items():
try:
replacements.append((re.compile(pattern), repl))
except re.error:
raise UserError(
u'malformed regular expression in replace: {0}'.format(
pattern
))
return replacements
def get_plugin_paths():
"""Get the list of search paths for plugins from the config file.
The value for "pluginpath" may be a single string or a list of
strings.
"""
pluginpaths = config['pluginpath'].get()
if isinstance(pluginpaths, basestring):
pluginpaths = [pluginpaths]
if not isinstance(pluginpaths, list):
raise confit.ConfigTypeError(
u'pluginpath must be string or a list of strings'
)
return map(util.normpath, pluginpaths)
def _pick_format(album, fmt=None):
"""Pick a format string for printing Album or Item objects,
falling back to config options and defaults.
"""
if fmt:
return fmt
if album:
return config['list_format_album'].get(unicode)
else:
return config['list_format_item'].get(unicode)
def print_obj(obj, lib, fmt=None):
"""Print an Album or Item object. If `fmt` is specified, use that
format string. Otherwise, use the configured template.
"""
album = isinstance(obj, library.Album)
fmt = _pick_format(album, fmt)
if isinstance(fmt, Template):
template = fmt
else:
template = Template(fmt)
if album:
print_(obj.evaluate_template(template))
else:
print_(obj.evaluate_template(template, lib=lib))
def term_width():
"""Get the width (columns) of the terminal."""
fallback = config['ui']['terminal_width'].get(int)
# The fcntl and termios modules are not available on non-Unix
# platforms, so we fall back to a constant.
try:
import fcntl
import termios
except ImportError:
return fallback
try:
buf = fcntl.ioctl(0, termios.TIOCGWINSZ, ' '*4)
except IOError:
return fallback
try:
height, width = struct.unpack('hh', buf)
except struct.error:
return fallback
return width
# Subcommand parsing infrastructure.
# This is a fairly generic subcommand parser for optparse. It is
# maintained externally here:
# http://gist.github.com/462717
# There you will also find a better description of the code and a more
# succinct example program.
class Subcommand(object):
"""A subcommand of a root command-line application that may be
invoked by a SubcommandOptionParser.
"""
def __init__(self, name, parser=None, help='', aliases=()):
"""Creates a new subcommand. name is the primary way to invoke
the subcommand; aliases are alternate names. parser is an
OptionParser responsible for parsing the subcommand's options.
help is a short description of the command. If no parser is
given, it defaults to a new, empty OptionParser.
"""
self.name = name
self.parser = parser or optparse.OptionParser()
self.aliases = aliases
self.help = help
class SubcommandsOptionParser(optparse.OptionParser):
"""A variant of OptionParser that parses subcommands and their
arguments.
"""
# A singleton command used to give help on other subcommands.
_HelpSubcommand = Subcommand('help', optparse.OptionParser(),
help='give detailed help on a specific sub-command',
aliases=('?',))
def __init__(self, *args, **kwargs):
"""Create a new subcommand-aware option parser. All of the
options to OptionParser.__init__ are supported in addition
to subcommands, a sequence of Subcommand objects.
"""
# The subcommand array, with the help command included.
self.subcommands = list(kwargs.pop('subcommands', []))
self.subcommands.append(self._HelpSubcommand)
# A more helpful default usage.
if 'usage' not in kwargs:
kwargs['usage'] = """
%prog COMMAND [ARGS...]
%prog help COMMAND"""
# Super constructor.
optparse.OptionParser.__init__(self, *args, **kwargs)
# Adjust the help-visible name of each subcommand.
for subcommand in self.subcommands:
subcommand.parser.prog = '%s %s' % \
(self.get_prog_name(), subcommand.name)
# Our root parser needs to stop on the first unrecognized argument.
self.disable_interspersed_args()
def add_subcommand(self, cmd):
"""Adds a Subcommand object to the parser's list of commands.
"""
self.subcommands.append(cmd)
# Add the list of subcommands to the help message.
def format_help(self, formatter=None):
# Get the original help message, to which we will append.
out = optparse.OptionParser.format_help(self, formatter)
if formatter is None:
formatter = self.formatter
# Subcommands header.
result = ["\n"]
result.append(formatter.format_heading('Commands'))
formatter.indent()
# Generate the display names (including aliases).
# Also determine the help position.
disp_names = []
help_position = 0
for subcommand in self.subcommands:
name = subcommand.name
if subcommand.aliases:
name += ' (%s)' % ', '.join(subcommand.aliases)
disp_names.append(name)
# Set the help position based on the max width.
proposed_help_position = len(name) + formatter.current_indent + 2
if proposed_help_position <= formatter.max_help_position:
help_position = max(help_position, proposed_help_position)
# Add each subcommand to the output.
for subcommand, name in zip(self.subcommands, disp_names):
# Lifted directly from optparse.py.
name_width = help_position - formatter.current_indent - 2
if len(name) > name_width:
name = "%*s%s\n" % (formatter.current_indent, "", name)
indent_first = help_position
else:
name = "%*s%-*s " % (formatter.current_indent, "",
name_width, name)
indent_first = 0
result.append(name)
help_width = formatter.width - help_position
help_lines = textwrap.wrap(subcommand.help, help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (help_position, "", line)
for line in help_lines[1:]])
formatter.dedent()
# Concatenate the original help message with the subcommand
# list.
return out + "".join(result)
def _subcommand_for_name(self, name):
"""Return the subcommand in self.subcommands matching the
given name. The name may either be the name of a subcommand or
an alias. If no subcommand matches, returns None.
"""
for subcommand in self.subcommands:
if name == subcommand.name or \
name in subcommand.aliases:
return subcommand
return None
def parse_args(self, a=None, v=None):
"""Like OptionParser.parse_args, but returns these four items:
- options: the options passed to the root parser
- subcommand: the Subcommand object that was invoked
- suboptions: the options passed to the subcommand parser
- subargs: the positional arguments passed to the subcommand
"""
options, args = optparse.OptionParser.parse_args(self, a, v)
if not args:
# No command given.
self.print_help()
self.exit()
else:
cmdname = args.pop(0)
subcommand = self._subcommand_for_name(cmdname)
if not subcommand:
self.error('unknown command ' + cmdname)
suboptions, subargs = subcommand.parser.parse_args(args)
if subcommand is self._HelpSubcommand:
if subargs:
# particular
cmdname = subargs[0]
helpcommand = self._subcommand_for_name(cmdname)
if not helpcommand:
self.error('no command named {0}'.format(cmdname))
helpcommand.parser.print_help()
self.exit()
else:
# general
self.print_help()
self.exit()
return options, subcommand, suboptions, subargs
# The root parser and its main function.
def _raw_main(args):
"""A helper function for `main` without top-level exception
handling.
"""
# Temporary: Migrate from 1.0-style configuration.
from beets.ui import migrate
migrate.automigrate()
# Get the default subcommands.
from beets.ui.commands import default_commands
# Add plugin paths.
sys.path += get_plugin_paths()
# Load requested plugins.
plugins.load_plugins(config['plugins'].as_str_seq())
plugins.send("pluginload")
# Construct the root parser.
commands = list(default_commands)
commands += plugins.commands()
commands.append(migrate.migrate_cmd) # Temporary.
parser = SubcommandsOptionParser(subcommands=commands)
parser.add_option('-l', '--library', dest='library',
help='library database file to use')
parser.add_option('-d', '--directory', dest='directory',
help="destination music directory")
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='print debugging information')
# Parse the command-line!
options, subcommand, suboptions, subargs = parser.parse_args(args)
config.set_args(options)
# Open library file.
dbpath = config['library'].as_filename()
try:
lib = library.Library(
dbpath,
config['directory'].as_filename(),
get_path_formats(),
get_replacements(),
)
except sqlite3.OperationalError:
raise UserError(u"database file {0} could not be opened".format(
util.displayable_path(dbpath)
))
plugins.send("library_opened", lib=lib)
# Configure the logger.
if config['verbose'].get(bool):
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
log.debug(u'data directory: {0}\n'
u'library database: {1}\n'
u'library directory: {2}'.format(
util.displayable_path(config.config_dir()),
util.displayable_path(lib.path),
util.displayable_path(lib.directory),
))
# Configure the MusicBrainz API.
mb.configure()
# Invoke the subcommand.
subcommand.func(lib, suboptions, subargs)
plugins.send('cli_exit', lib=lib)
def main(args=None):
"""Run the main command-line interface for beets. Includes top-level
exception handlers that print friendly error messages.
"""
try:
_raw_main(args)
except UserError as exc:
message = exc.args[0] if exc.args else None
log.error(u'error: {0}'.format(message))
sys.exit(1)
except util.HumanReadableException as exc:
exc.log(log)
sys.exit(1)
except confit.ConfigError as exc:
log.error(u'configuration error: {0}'.format(exc))
except IOError as exc:
if exc.errno == errno.EPIPE:
# "Broken pipe". End silently.
pass
else:
raise
except KeyboardInterrupt:
# Silently ignore ^C except in verbose mode.
log.debug(traceback.format_exc())
| 33.862893 | 77 | 0.611159 |
from __future__ import print_function
import locale
import optparse
import textwrap
import sys
from difflib import SequenceMatcher
import logging
import sqlite3
import errno
import re
import struct
import traceback
from beets import library
from beets import plugins
from beets import util
from beets.util.functemplate import Template
from beets import config
from beets.util import confit
from beets.autotag import mb
if sys.platform == 'win32':
try:
import colorama
except ImportError:
pass
else:
colorama.init()
PF_KEY_QUERIES = {
'comp': 'comp:true',
'singleton': 'singleton:true',
}
class UserError(Exception):
pass
log = logging.getLogger('beets')
def _encoding():
encoding = config['terminal_encoding'].get()
if encoding:
return encoding
try:
return locale.getdefaultlocale()[1] or 'utf8'
except ValueError:
return 'utf8'
def decargs(arglist):
return [s.decode(_encoding()) for s in arglist]
def print_(*strings):
if strings:
if isinstance(strings[0], unicode):
txt = u' '.join(strings)
else:
txt = ' '.join(strings)
else:
txt = u''
if isinstance(txt, unicode):
txt = txt.encode(_encoding(), 'replace')
print(txt)
def input_(prompt=None):
if prompt:
if isinstance(prompt, unicode):
prompt = prompt.encode(_encoding(), 'replace')
print(prompt, end=' ')
try:
resp = raw_input()
except EOFError:
raise UserError('stdin stream ended while input required')
return resp.decode(sys.stdin.encoding or 'utf8', 'ignore')
def input_options(options, require=False, prompt=None, fallback_prompt=None,
numrange=None, default=None, max_width=72):
letters = {}
display_letters = []
capitalized = []
first = True
for option in options:
for letter in option:
if letter.isalpha() and letter.upper() == letter:
found_letter = letter
break
else:
for letter in option:
if not letter.isalpha():
continue
if letter not in letters:
found_letter = letter
break
else:
raise ValueError('no unambiguous lettering found')
letters[found_letter.lower()] = option
index = option.index(found_letter)
# Mark the option's shortcut letter for display.
if not require and ((default is None and not numrange and first) or
(isinstance(default, basestring) and
found_letter.lower() == default.lower())):
show_letter = '[%s]' % found_letter.upper()
is_default = True
else:
show_letter = found_letter.upper()
is_default = False
show_letter = colorize('turquoise' if is_default else 'blue',
show_letter)
capitalized.append(
option[:index] + show_letter + option[index+1:]
)
display_letters.append(found_letter.upper())
first = False
if require:
default = None
elif default is None:
if numrange:
default = numrange[0]
else:
default = display_letters[0].lower()
if not prompt:
prompt_parts = []
prompt_part_lengths = []
if numrange:
if isinstance(default, int):
default_name = str(default)
default_name = colorize('turquoise', default_name)
tmpl = '# selection (default %s)'
prompt_parts.append(tmpl % default_name)
prompt_part_lengths.append(len(tmpl % str(default)))
else:
prompt_parts.append('# selection')
prompt_part_lengths.append(len(prompt_parts[-1]))
prompt_parts += capitalized
prompt_part_lengths += [len(s) for s in options]
prompt = ''
line_length = 0
for i, (part, length) in enumerate(zip(prompt_parts,
prompt_part_lengths)):
if i == len(prompt_parts) - 1:
part += '?'
else:
part += ','
length += 1
if line_length + length + 1 > max_width:
prompt += '\n'
line_length = 0
if line_length != 0:
part = ' ' + part
length += 1
prompt += part
line_length += length
if not fallback_prompt:
fallback_prompt = 'Enter one of '
if numrange:
fallback_prompt += '%i-%i, ' % numrange
fallback_prompt += ', '.join(display_letters) + ':'
resp = input_(prompt)
while True:
resp = resp.strip().lower()
if default is not None and not resp:
resp = default
if numrange:
try:
resp = int(resp)
except ValueError:
pass
else:
low, high = numrange
if low <= resp <= high:
return resp
else:
resp = None
if resp:
resp = resp[0]
if resp in letters:
return resp
resp = input_(fallback_prompt)
def input_yn(prompt, require=False):
sel = input_options(
('y', 'n'), require, prompt, 'Enter Y or N:'
)
return sel == 'y'
def human_bytes(size):
suffices = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'HB']
for suffix in suffices:
if size < 1024:
return "%3.1f %s" % (size, suffix)
size /= 1024.0
return "big"
def human_seconds(interval):
units = [
(1, 'second'),
(60, 'minute'),
(60, 'hour'),
(24, 'day'),
(7, 'week'),
(52, 'year'),
(10, 'decade'),
]
for i in range(len(units)-1):
increment, suffix = units[i]
next_increment, _ = units[i+1]
interval /= float(increment)
if interval < next_increment:
break
else:
increment, suffix = units[-1]
interval /= float(increment)
return "%3.1f %ss" % (interval, suffix)
def human_seconds_short(interval):
interval = int(interval)
return u'%i:%02i' % (interval // 60, interval % 60)
COLOR_ESCAPE = "\x1b["
DARK_COLORS = ["black", "darkred", "darkgreen", "brown", "darkblue",
"purple", "teal", "lightgray"]
LIGHT_COLORS = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
RESET_COLOR = COLOR_ESCAPE + "39;49;00m"
def _colorize(color, text):
if color in DARK_COLORS:
escape = COLOR_ESCAPE + "%im" % (DARK_COLORS.index(color) + 30)
elif color in LIGHT_COLORS:
escape = COLOR_ESCAPE + "%i;01m" % (LIGHT_COLORS.index(color) + 30)
else:
raise ValueError('no such color %s', color)
return escape + text + RESET_COLOR
def colorize(color, text):
if config['color']:
return _colorize(color, text)
else:
return text
def _colordiff(a, b, highlight='red', minor_highlight='lightgray'):
if not isinstance(a, basestring) or not isinstance(b, basestring):
a = unicode(a)
b = unicode(b)
if a == b:
return a, b
else:
return colorize(highlight, a), colorize(highlight, b)
if isinstance(a, bytes) or isinstance(b, bytes):
a = util.displayable_path(a)
b = util.displayable_path(b)
a_out = []
b_out = []
matcher = SequenceMatcher(lambda x: False, a, b)
for op, a_start, a_end, b_start, b_end in matcher.get_opcodes():
if op == 'equal':
a_out.append(a[a_start:a_end])
b_out.append(b[b_start:b_end])
elif op == 'insert':
b_out.append(colorize(highlight, b[b_start:b_end]))
elif op == 'delete':
a_out.append(colorize(highlight, a[a_start:a_end]))
elif op == 'replace':
if a[a_start:a_end].lower() != b[b_start:b_end].lower():
color = highlight
else:
color = minor_highlight
a_out.append(colorize(color, a[a_start:a_end]))
b_out.append(colorize(color, b[b_start:b_end]))
else:
assert(False)
return u''.join(a_out), u''.join(b_out)
def colordiff(a, b, highlight='red'):
if config['color']:
return _colordiff(a, b, highlight)
else:
return unicode(a), unicode(b)
def color_diff_suffix(a, b, highlight='red'):
a, b = unicode(a), unicode(b)
if not config['color']:
return a, b
# Fast path.
if a == b:
return a, b
# Find the longest common prefix.
first_diff = None
for i in range(min(len(a), len(b))):
if a[i] != b[i]:
first_diff = i
break
else:
first_diff = min(len(a), len(b))
# Colorize from the first difference on.
return a[:first_diff] + colorize(highlight, a[first_diff:]), \
b[:first_diff] + colorize(highlight, b[first_diff:])
def get_path_formats(subview=None):
path_formats = []
subview = subview or config['paths']
for query, view in subview.items():
query = PF_KEY_QUERIES.get(query, query) # Expand common queries.
path_formats.append((query, Template(view.get(unicode))))
return path_formats
def get_replacements():
replacements = []
for pattern, repl in config['replace'].get(dict).items():
try:
replacements.append((re.compile(pattern), repl))
except re.error:
raise UserError(
u'malformed regular expression in replace: {0}'.format(
pattern
))
return replacements
def get_plugin_paths():
pluginpaths = config['pluginpath'].get()
if isinstance(pluginpaths, basestring):
pluginpaths = [pluginpaths]
if not isinstance(pluginpaths, list):
raise confit.ConfigTypeError(
u'pluginpath must be string or a list of strings'
)
return map(util.normpath, pluginpaths)
def _pick_format(album, fmt=None):
if fmt:
return fmt
if album:
return config['list_format_album'].get(unicode)
else:
return config['list_format_item'].get(unicode)
def print_obj(obj, lib, fmt=None):
album = isinstance(obj, library.Album)
fmt = _pick_format(album, fmt)
if isinstance(fmt, Template):
template = fmt
else:
template = Template(fmt)
if album:
print_(obj.evaluate_template(template))
else:
print_(obj.evaluate_template(template, lib=lib))
def term_width():
fallback = config['ui']['terminal_width'].get(int)
# The fcntl and termios modules are not available on non-Unix
# platforms, so we fall back to a constant.
try:
import fcntl
import termios
except ImportError:
return fallback
try:
buf = fcntl.ioctl(0, termios.TIOCGWINSZ, ' '*4)
except IOError:
return fallback
try:
height, width = struct.unpack('hh', buf)
except struct.error:
return fallback
return width
# Subcommand parsing infrastructure.
# This is a fairly generic subcommand parser for optparse. It is
# maintained externally here:
# http://gist.github.com/462717
# There you will also find a better description of the code and a more
# succinct example program.
class Subcommand(object):
def __init__(self, name, parser=None, help='', aliases=()):
self.name = name
self.parser = parser or optparse.OptionParser()
self.aliases = aliases
self.help = help
class SubcommandsOptionParser(optparse.OptionParser):
# A singleton command used to give help on other subcommands.
_HelpSubcommand = Subcommand('help', optparse.OptionParser(),
help='give detailed help on a specific sub-command',
aliases=('?',))
def __init__(self, *args, **kwargs):
# The subcommand array, with the help command included.
self.subcommands = list(kwargs.pop('subcommands', []))
self.subcommands.append(self._HelpSubcommand)
# A more helpful default usage.
if 'usage' not in kwargs:
kwargs['usage'] = """
%prog COMMAND [ARGS...]
%prog help COMMAND"""
# Super constructor.
optparse.OptionParser.__init__(self, *args, **kwargs)
# Adjust the help-visible name of each subcommand.
for subcommand in self.subcommands:
subcommand.parser.prog = '%s %s' % \
(self.get_prog_name(), subcommand.name)
# Our root parser needs to stop on the first unrecognized argument.
self.disable_interspersed_args()
def add_subcommand(self, cmd):
self.subcommands.append(cmd)
# Add the list of subcommands to the help message.
def format_help(self, formatter=None):
# Get the original help message, to which we will append.
out = optparse.OptionParser.format_help(self, formatter)
if formatter is None:
formatter = self.formatter
# Subcommands header.
result = ["\n"]
result.append(formatter.format_heading('Commands'))
formatter.indent()
# Generate the display names (including aliases).
# Also determine the help position.
disp_names = []
help_position = 0
for subcommand in self.subcommands:
name = subcommand.name
if subcommand.aliases:
name += ' (%s)' % ', '.join(subcommand.aliases)
disp_names.append(name)
# Set the help position based on the max width.
proposed_help_position = len(name) + formatter.current_indent + 2
if proposed_help_position <= formatter.max_help_position:
help_position = max(help_position, proposed_help_position)
# Add each subcommand to the output.
for subcommand, name in zip(self.subcommands, disp_names):
# Lifted directly from optparse.py.
name_width = help_position - formatter.current_indent - 2
if len(name) > name_width:
name = "%*s%s\n" % (formatter.current_indent, "", name)
indent_first = help_position
else:
name = "%*s%-*s " % (formatter.current_indent, "",
name_width, name)
indent_first = 0
result.append(name)
help_width = formatter.width - help_position
help_lines = textwrap.wrap(subcommand.help, help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (help_position, "", line)
for line in help_lines[1:]])
formatter.dedent()
# Concatenate the original help message with the subcommand
# list.
return out + "".join(result)
def _subcommand_for_name(self, name):
for subcommand in self.subcommands:
if name == subcommand.name or \
name in subcommand.aliases:
return subcommand
return None
def parse_args(self, a=None, v=None):
options, args = optparse.OptionParser.parse_args(self, a, v)
if not args:
# No command given.
self.print_help()
self.exit()
else:
cmdname = args.pop(0)
subcommand = self._subcommand_for_name(cmdname)
if not subcommand:
self.error('unknown command ' + cmdname)
suboptions, subargs = subcommand.parser.parse_args(args)
if subcommand is self._HelpSubcommand:
if subargs:
# particular
cmdname = subargs[0]
helpcommand = self._subcommand_for_name(cmdname)
if not helpcommand:
self.error('no command named {0}'.format(cmdname))
helpcommand.parser.print_help()
self.exit()
else:
# general
self.print_help()
self.exit()
return options, subcommand, suboptions, subargs
# The root parser and its main function.
def _raw_main(args):
# Temporary: Migrate from 1.0-style configuration.
from beets.ui import migrate
migrate.automigrate()
# Get the default subcommands.
from beets.ui.commands import default_commands
# Add plugin paths.
sys.path += get_plugin_paths()
# Load requested plugins.
plugins.load_plugins(config['plugins'].as_str_seq())
plugins.send("pluginload")
# Construct the root parser.
commands = list(default_commands)
commands += plugins.commands()
commands.append(migrate.migrate_cmd) # Temporary.
parser = SubcommandsOptionParser(subcommands=commands)
parser.add_option('-l', '--library', dest='library',
help='library database file to use')
parser.add_option('-d', '--directory', dest='directory',
help="destination music directory")
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='print debugging information')
# Parse the command-line!
options, subcommand, suboptions, subargs = parser.parse_args(args)
config.set_args(options)
# Open library file.
dbpath = config['library'].as_filename()
try:
lib = library.Library(
dbpath,
config['directory'].as_filename(),
get_path_formats(),
get_replacements(),
)
except sqlite3.OperationalError:
raise UserError(u"database file {0} could not be opened".format(
util.displayable_path(dbpath)
))
plugins.send("library_opened", lib=lib)
# Configure the logger.
if config['verbose'].get(bool):
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
log.debug(u'data directory: {0}\n'
u'library database: {1}\n'
u'library directory: {2}'.format(
util.displayable_path(config.config_dir()),
util.displayable_path(lib.path),
util.displayable_path(lib.directory),
))
# Configure the MusicBrainz API.
mb.configure()
# Invoke the subcommand.
subcommand.func(lib, suboptions, subargs)
plugins.send('cli_exit', lib=lib)
def main(args=None):
try:
_raw_main(args)
except UserError as exc:
message = exc.args[0] if exc.args else None
log.error(u'error: {0}'.format(message))
sys.exit(1)
except util.HumanReadableException as exc:
exc.log(log)
sys.exit(1)
except confit.ConfigError as exc:
log.error(u'configuration error: {0}'.format(exc))
except IOError as exc:
if exc.errno == errno.EPIPE:
# "Broken pipe". End silently.
pass
else:
raise
except KeyboardInterrupt:
# Silently ignore ^C except in verbose mode.
log.debug(traceback.format_exc())
| true | true |
f7288148cb9947d25dfda5b2bf1b30c0e9b02e38 | 701 | py | Python | kge/__init__.py | l6270789/TransE-Knowledge-Graph-Embedding | 1bd07c1701741c08451f28ac367c2453e21721cc | [
"MIT"
] | 73 | 2018-08-23T14:17:44.000Z | 2022-03-30T08:24:00.000Z | kge/__init__.py | Lapis-Hong/Knowledge-Graph-Embedding | 1bd07c1701741c08451f28ac367c2453e21721cc | [
"MIT"
] | 4 | 2018-10-21T03:13:08.000Z | 2020-01-10T08:42:22.000Z | kge/__init__.py | Lapis-Hong/Knowledge-Graph-Embedding | 1bd07c1701741c08451f28ac367c2453e21721cc | [
"MIT"
] | 20 | 2018-09-19T02:06:39.000Z | 2022-03-30T08:24:49.000Z | #!/usr/bin/env python
# coding: utf-8
# @Author: lapis-hong
# @Date : 2018/8/14
"""This module contains several models for Knowledge Graph Embedding
All model classes must inherit class `BaseModel` (defined in model.py)
"""
# import selected Classes into the package level so they can be convieniently imported from the package.
# use from model import TransE instead of from model.transE import TransE
from kge.transE import TransE
from kge.distmult import DISTMULT
from kge.transH import TransH
from kge.transR import TransR
from kge.transD import TransD
from kge.stransE import STransE
# from model import *
__all__ = ["TransE", "DISTMULT", "TransH", "TransR", "TransE", "TransD", "STransE"]
| 31.863636 | 104 | 0.760342 |
from kge.transE import TransE
from kge.distmult import DISTMULT
from kge.transH import TransH
from kge.transR import TransR
from kge.transD import TransD
from kge.stransE import STransE
__all__ = ["TransE", "DISTMULT", "TransH", "TransR", "TransE", "TransD", "STransE"]
| true | true |
f7288179eccfa4f95c2ae7406d29e6ee8e95e728 | 9,353 | py | Python | pypy3-v5.5.0-linux64/lib_pypy/_sqlite3_build.py | timm/timmnix | cd246fdea8ec4668c1ad1b30a02d6f7437e46201 | [
"MIT"
] | null | null | null | pypy3-v5.5.0-linux64/lib_pypy/_sqlite3_build.py | timm/timmnix | cd246fdea8ec4668c1ad1b30a02d6f7437e46201 | [
"MIT"
] | null | null | null | pypy3-v5.5.0-linux64/lib_pypy/_sqlite3_build.py | timm/timmnix | cd246fdea8ec4668c1ad1b30a02d6f7437e46201 | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
# pysqlite2/dbapi.py: pysqlite DB-API module
#
# Copyright (C) 2007-2008 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
#
# Note: This software has been modified for use in PyPy.
import sys, os
from cffi import FFI as _FFI
_ffi = _FFI()
_ffi.cdef("""
#define SQLITE_OK ...
#define SQLITE_ERROR ...
#define SQLITE_INTERNAL ...
#define SQLITE_PERM ...
#define SQLITE_ABORT ...
#define SQLITE_BUSY ...
#define SQLITE_LOCKED ...
#define SQLITE_NOMEM ...
#define SQLITE_READONLY ...
#define SQLITE_INTERRUPT ...
#define SQLITE_IOERR ...
#define SQLITE_CORRUPT ...
#define SQLITE_NOTFOUND ...
#define SQLITE_FULL ...
#define SQLITE_CANTOPEN ...
#define SQLITE_PROTOCOL ...
#define SQLITE_EMPTY ...
#define SQLITE_SCHEMA ...
#define SQLITE_TOOBIG ...
#define SQLITE_CONSTRAINT ...
#define SQLITE_MISMATCH ...
#define SQLITE_MISUSE ...
#define SQLITE_NOLFS ...
#define SQLITE_AUTH ...
#define SQLITE_FORMAT ...
#define SQLITE_RANGE ...
#define SQLITE_NOTADB ...
#define SQLITE_ROW ...
#define SQLITE_DONE ...
#define SQLITE_INTEGER ...
#define SQLITE_FLOAT ...
#define SQLITE_BLOB ...
#define SQLITE_NULL ...
#define SQLITE_TEXT ...
#define SQLITE3_TEXT ...
static void *const SQLITE_TRANSIENT;
#define SQLITE_UTF8 ...
#define SQLITE_DENY ...
#define SQLITE_IGNORE ...
#define SQLITE_CREATE_INDEX ...
#define SQLITE_CREATE_TABLE ...
#define SQLITE_CREATE_TEMP_INDEX ...
#define SQLITE_CREATE_TEMP_TABLE ...
#define SQLITE_CREATE_TEMP_TRIGGER ...
#define SQLITE_CREATE_TEMP_VIEW ...
#define SQLITE_CREATE_TRIGGER ...
#define SQLITE_CREATE_VIEW ...
#define SQLITE_DELETE ...
#define SQLITE_DROP_INDEX ...
#define SQLITE_DROP_TABLE ...
#define SQLITE_DROP_TEMP_INDEX ...
#define SQLITE_DROP_TEMP_TABLE ...
#define SQLITE_DROP_TEMP_TRIGGER ...
#define SQLITE_DROP_TEMP_VIEW ...
#define SQLITE_DROP_TRIGGER ...
#define SQLITE_DROP_VIEW ...
#define SQLITE_INSERT ...
#define SQLITE_PRAGMA ...
#define SQLITE_READ ...
#define SQLITE_SELECT ...
#define SQLITE_TRANSACTION ...
#define SQLITE_UPDATE ...
#define SQLITE_ATTACH ...
#define SQLITE_DETACH ...
#define SQLITE_ALTER_TABLE ...
#define SQLITE_REINDEX ...
#define SQLITE_ANALYZE ...
#define SQLITE_CREATE_VTABLE ...
#define SQLITE_DROP_VTABLE ...
#define SQLITE_FUNCTION ...
const char *sqlite3_libversion(void);
typedef ... sqlite3;
typedef ... sqlite3_stmt;
typedef ... sqlite3_context;
typedef ... sqlite3_value;
typedef int64_t sqlite3_int64;
typedef uint64_t sqlite3_uint64;
int sqlite3_open(
const char *filename, /* Database filename (UTF-8) */
sqlite3 **ppDb /* OUT: SQLite db handle */
);
int sqlite3_close(sqlite3 *);
int sqlite3_busy_timeout(sqlite3*, int ms);
int sqlite3_prepare_v2(
sqlite3 *db, /* Database handle */
const char *zSql, /* SQL statement, UTF-8 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const char **pzTail /* OUT: Pointer to unused portion of zSql */
);
int sqlite3_finalize(sqlite3_stmt *pStmt);
int sqlite3_data_count(sqlite3_stmt *pStmt);
int sqlite3_column_count(sqlite3_stmt *pStmt);
const char *sqlite3_column_name(sqlite3_stmt*, int N);
int sqlite3_get_autocommit(sqlite3*);
int sqlite3_reset(sqlite3_stmt *pStmt);
int sqlite3_step(sqlite3_stmt*);
int sqlite3_errcode(sqlite3 *db);
const char *sqlite3_errmsg(sqlite3*);
int sqlite3_changes(sqlite3*);
int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
int sqlite3_bind_double(sqlite3_stmt*, int, double);
int sqlite3_bind_int(sqlite3_stmt*, int, int);
int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
int sqlite3_bind_null(sqlite3_stmt*, int);
int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*));
int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
double sqlite3_column_double(sqlite3_stmt*, int iCol);
int sqlite3_column_int(sqlite3_stmt*, int iCol);
sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol);
const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
int sqlite3_column_type(sqlite3_stmt*, int iCol);
const char *sqlite3_column_decltype(sqlite3_stmt*,int);
void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
void sqlite3_trace(sqlite3*, void(*)(void*, const char*), void*);
int sqlite3_create_collation(
sqlite3*,
const char *zName,
int eTextRep,
void*,
int(*xCompare)(void*,int,const void*,int,const void*)
);
int sqlite3_set_authorizer(
sqlite3*,
int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
void *pUserData
);
int sqlite3_create_function(
sqlite3 *db,
const char *zFunctionName,
int nArg,
int eTextRep,
void *pApp,
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*)
);
void *sqlite3_aggregate_context(sqlite3_context*, int nBytes);
sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*);
int sqlite3_bind_parameter_count(sqlite3_stmt*);
const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int);
int sqlite3_total_changes(sqlite3*);
int sqlite3_prepare(
sqlite3 *db, /* Database handle */
const char *zSql, /* SQL statement, UTF-8 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const char **pzTail /* OUT: Pointer to unused portion of zSql */
);
void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
void sqlite3_result_double(sqlite3_context*, double);
void sqlite3_result_error(sqlite3_context*, const char*, int);
void sqlite3_result_error16(sqlite3_context*, const void*, int);
void sqlite3_result_error_toobig(sqlite3_context*);
void sqlite3_result_error_nomem(sqlite3_context*);
void sqlite3_result_error_code(sqlite3_context*, int);
void sqlite3_result_int(sqlite3_context*, int);
void sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
void sqlite3_result_null(sqlite3_context*);
void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
void sqlite3_result_text16le(sqlite3_context*,const void*, int,void(*)(void*));
void sqlite3_result_text16be(sqlite3_context*,const void*, int,void(*)(void*));
void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
void sqlite3_result_zeroblob(sqlite3_context*, int n);
const void *sqlite3_value_blob(sqlite3_value*);
int sqlite3_value_bytes(sqlite3_value*);
int sqlite3_value_bytes16(sqlite3_value*);
double sqlite3_value_double(sqlite3_value*);
int sqlite3_value_int(sqlite3_value*);
sqlite3_int64 sqlite3_value_int64(sqlite3_value*);
const unsigned char *sqlite3_value_text(sqlite3_value*);
const void *sqlite3_value_text16(sqlite3_value*);
const void *sqlite3_value_text16le(sqlite3_value*);
const void *sqlite3_value_text16be(sqlite3_value*);
int sqlite3_value_type(sqlite3_value*);
int sqlite3_value_numeric_type(sqlite3_value*);
""")
def _has_load_extension():
"""Only available since 3.3.6"""
unverified_ffi = _FFI()
unverified_ffi.cdef("""
typedef ... sqlite3;
int sqlite3_enable_load_extension(sqlite3 *db, int onoff);
""")
libname = 'sqlite3'
if sys.platform == 'win32':
import os
_libname = os.path.join(os.path.dirname(sys.executable), libname)
if os.path.exists(_libname + '.dll'):
libname = _libname
unverified_lib = unverified_ffi.dlopen(libname)
return hasattr(unverified_lib, 'sqlite3_enable_load_extension')
if _has_load_extension():
_ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);")
if sys.platform.startswith('freebsd'):
_localbase = os.environ.get('LOCALBASE', '/usr/local')
extra_args = dict(
libraries=['sqlite3'],
include_dirs=[os.path.join(_localbase, 'include')],
library_dirs=[os.path.join(_localbase, 'lib')]
)
else:
extra_args = dict(
libraries=['sqlite3']
)
_ffi.set_source("_sqlite3_cffi", "#include <sqlite3.h>", **extra_args)
if __name__ == "__main__":
_ffi.compile()
| 35.029963 | 79 | 0.742008 |
import sys, os
from cffi import FFI as _FFI
_ffi = _FFI()
_ffi.cdef("""
#define SQLITE_OK ...
#define SQLITE_ERROR ...
#define SQLITE_INTERNAL ...
#define SQLITE_PERM ...
#define SQLITE_ABORT ...
#define SQLITE_BUSY ...
#define SQLITE_LOCKED ...
#define SQLITE_NOMEM ...
#define SQLITE_READONLY ...
#define SQLITE_INTERRUPT ...
#define SQLITE_IOERR ...
#define SQLITE_CORRUPT ...
#define SQLITE_NOTFOUND ...
#define SQLITE_FULL ...
#define SQLITE_CANTOPEN ...
#define SQLITE_PROTOCOL ...
#define SQLITE_EMPTY ...
#define SQLITE_SCHEMA ...
#define SQLITE_TOOBIG ...
#define SQLITE_CONSTRAINT ...
#define SQLITE_MISMATCH ...
#define SQLITE_MISUSE ...
#define SQLITE_NOLFS ...
#define SQLITE_AUTH ...
#define SQLITE_FORMAT ...
#define SQLITE_RANGE ...
#define SQLITE_NOTADB ...
#define SQLITE_ROW ...
#define SQLITE_DONE ...
#define SQLITE_INTEGER ...
#define SQLITE_FLOAT ...
#define SQLITE_BLOB ...
#define SQLITE_NULL ...
#define SQLITE_TEXT ...
#define SQLITE3_TEXT ...
static void *const SQLITE_TRANSIENT;
#define SQLITE_UTF8 ...
#define SQLITE_DENY ...
#define SQLITE_IGNORE ...
#define SQLITE_CREATE_INDEX ...
#define SQLITE_CREATE_TABLE ...
#define SQLITE_CREATE_TEMP_INDEX ...
#define SQLITE_CREATE_TEMP_TABLE ...
#define SQLITE_CREATE_TEMP_TRIGGER ...
#define SQLITE_CREATE_TEMP_VIEW ...
#define SQLITE_CREATE_TRIGGER ...
#define SQLITE_CREATE_VIEW ...
#define SQLITE_DELETE ...
#define SQLITE_DROP_INDEX ...
#define SQLITE_DROP_TABLE ...
#define SQLITE_DROP_TEMP_INDEX ...
#define SQLITE_DROP_TEMP_TABLE ...
#define SQLITE_DROP_TEMP_TRIGGER ...
#define SQLITE_DROP_TEMP_VIEW ...
#define SQLITE_DROP_TRIGGER ...
#define SQLITE_DROP_VIEW ...
#define SQLITE_INSERT ...
#define SQLITE_PRAGMA ...
#define SQLITE_READ ...
#define SQLITE_SELECT ...
#define SQLITE_TRANSACTION ...
#define SQLITE_UPDATE ...
#define SQLITE_ATTACH ...
#define SQLITE_DETACH ...
#define SQLITE_ALTER_TABLE ...
#define SQLITE_REINDEX ...
#define SQLITE_ANALYZE ...
#define SQLITE_CREATE_VTABLE ...
#define SQLITE_DROP_VTABLE ...
#define SQLITE_FUNCTION ...
const char *sqlite3_libversion(void);
typedef ... sqlite3;
typedef ... sqlite3_stmt;
typedef ... sqlite3_context;
typedef ... sqlite3_value;
typedef int64_t sqlite3_int64;
typedef uint64_t sqlite3_uint64;
int sqlite3_open(
const char *filename, /* Database filename (UTF-8) */
sqlite3 **ppDb /* OUT: SQLite db handle */
);
int sqlite3_close(sqlite3 *);
int sqlite3_busy_timeout(sqlite3*, int ms);
int sqlite3_prepare_v2(
sqlite3 *db, /* Database handle */
const char *zSql, /* SQL statement, UTF-8 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const char **pzTail /* OUT: Pointer to unused portion of zSql */
);
int sqlite3_finalize(sqlite3_stmt *pStmt);
int sqlite3_data_count(sqlite3_stmt *pStmt);
int sqlite3_column_count(sqlite3_stmt *pStmt);
const char *sqlite3_column_name(sqlite3_stmt*, int N);
int sqlite3_get_autocommit(sqlite3*);
int sqlite3_reset(sqlite3_stmt *pStmt);
int sqlite3_step(sqlite3_stmt*);
int sqlite3_errcode(sqlite3 *db);
const char *sqlite3_errmsg(sqlite3*);
int sqlite3_changes(sqlite3*);
int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
int sqlite3_bind_double(sqlite3_stmt*, int, double);
int sqlite3_bind_int(sqlite3_stmt*, int, int);
int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
int sqlite3_bind_null(sqlite3_stmt*, int);
int sqlite3_bind_text(sqlite3_stmt*, int, const char*, int n, void(*)(void*));
int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
double sqlite3_column_double(sqlite3_stmt*, int iCol);
int sqlite3_column_int(sqlite3_stmt*, int iCol);
sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol);
const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
int sqlite3_column_type(sqlite3_stmt*, int iCol);
const char *sqlite3_column_decltype(sqlite3_stmt*,int);
void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
void sqlite3_trace(sqlite3*, void(*)(void*, const char*), void*);
int sqlite3_create_collation(
sqlite3*,
const char *zName,
int eTextRep,
void*,
int(*xCompare)(void*,int,const void*,int,const void*)
);
int sqlite3_set_authorizer(
sqlite3*,
int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
void *pUserData
);
int sqlite3_create_function(
sqlite3 *db,
const char *zFunctionName,
int nArg,
int eTextRep,
void *pApp,
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*)
);
void *sqlite3_aggregate_context(sqlite3_context*, int nBytes);
sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*);
int sqlite3_bind_parameter_count(sqlite3_stmt*);
const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int);
int sqlite3_total_changes(sqlite3*);
int sqlite3_prepare(
sqlite3 *db, /* Database handle */
const char *zSql, /* SQL statement, UTF-8 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const char **pzTail /* OUT: Pointer to unused portion of zSql */
);
void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
void sqlite3_result_double(sqlite3_context*, double);
void sqlite3_result_error(sqlite3_context*, const char*, int);
void sqlite3_result_error16(sqlite3_context*, const void*, int);
void sqlite3_result_error_toobig(sqlite3_context*);
void sqlite3_result_error_nomem(sqlite3_context*);
void sqlite3_result_error_code(sqlite3_context*, int);
void sqlite3_result_int(sqlite3_context*, int);
void sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
void sqlite3_result_null(sqlite3_context*);
void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
void sqlite3_result_text16le(sqlite3_context*,const void*, int,void(*)(void*));
void sqlite3_result_text16be(sqlite3_context*,const void*, int,void(*)(void*));
void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
void sqlite3_result_zeroblob(sqlite3_context*, int n);
const void *sqlite3_value_blob(sqlite3_value*);
int sqlite3_value_bytes(sqlite3_value*);
int sqlite3_value_bytes16(sqlite3_value*);
double sqlite3_value_double(sqlite3_value*);
int sqlite3_value_int(sqlite3_value*);
sqlite3_int64 sqlite3_value_int64(sqlite3_value*);
const unsigned char *sqlite3_value_text(sqlite3_value*);
const void *sqlite3_value_text16(sqlite3_value*);
const void *sqlite3_value_text16le(sqlite3_value*);
const void *sqlite3_value_text16be(sqlite3_value*);
int sqlite3_value_type(sqlite3_value*);
int sqlite3_value_numeric_type(sqlite3_value*);
""")
def _has_load_extension():
unverified_ffi = _FFI()
unverified_ffi.cdef("""
typedef ... sqlite3;
int sqlite3_enable_load_extension(sqlite3 *db, int onoff);
""")
libname = 'sqlite3'
if sys.platform == 'win32':
import os
_libname = os.path.join(os.path.dirname(sys.executable), libname)
if os.path.exists(_libname + '.dll'):
libname = _libname
unverified_lib = unverified_ffi.dlopen(libname)
return hasattr(unverified_lib, 'sqlite3_enable_load_extension')
if _has_load_extension():
_ffi.cdef("int sqlite3_enable_load_extension(sqlite3 *db, int onoff);")
if sys.platform.startswith('freebsd'):
_localbase = os.environ.get('LOCALBASE', '/usr/local')
extra_args = dict(
libraries=['sqlite3'],
include_dirs=[os.path.join(_localbase, 'include')],
library_dirs=[os.path.join(_localbase, 'lib')]
)
else:
extra_args = dict(
libraries=['sqlite3']
)
_ffi.set_source("_sqlite3_cffi", "#include <sqlite3.h>", **extra_args)
if __name__ == "__main__":
_ffi.compile()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.