hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
f724555c65e6b8b852f9d596dad3446f0fbf8099
448
py
Python
chap_7/decay_plot.py
jieyanzhu/codes-effective-computation-in-physics
0c99f2da9d462229e6b174a010d7c7b08af4482b
[ "MIT" ]
null
null
null
chap_7/decay_plot.py
jieyanzhu/codes-effective-computation-in-physics
0c99f2da9d462229e6b174a010d7c7b08af4482b
[ "MIT" ]
1
2021-12-23T10:09:01.000Z
2021-12-23T12:06:25.000Z
chap_7/decay_plot.py
jieyanzhu/codes-effective-computation-in-physics
0c99f2da9d462229e6b174a010d7c7b08af4482b
[ "MIT" ]
null
null
null
import numpy as np # as in the previous example, load decays.csv into a NumPy array decaydata = np.loadtxt('decays.csv', delimiter=',', skiprows=1) # provide handles for the x and y columns time = decaydata[:,0] decays = decaydata[:,1] # import the matplotlib plotting functionality import pylab as plt plt.plot(time, decays) plt.xlabel('Time (s)') plt.ylabel('Decays') plt.title('Decays') plt.grid(True) plt.savefig("decays_matplotlib.png")
22.4
64
0.734375
import numpy as np decaydata = np.loadtxt('decays.csv', delimiter=',', skiprows=1) time = decaydata[:,0] decays = decaydata[:,1] import pylab as plt plt.plot(time, decays) plt.xlabel('Time (s)') plt.ylabel('Decays') plt.title('Decays') plt.grid(True) plt.savefig("decays_matplotlib.png")
true
true
f72455bce16e4d2b4c4851dc371a6ac3b783489a
1,136
py
Python
oautom/execution/lambda_execution.py
FabienArcellier/oautom
ed818a34ca726355b1227d3485052793e159b177
[ "MIT" ]
null
null
null
oautom/execution/lambda_execution.py
FabienArcellier/oautom
ed818a34ca726355b1227d3485052793e159b177
[ "MIT" ]
null
null
null
oautom/execution/lambda_execution.py
FabienArcellier/oautom
ed818a34ca726355b1227d3485052793e159b177
[ "MIT" ]
null
null
null
# pylint: disable=useless-super-delegation import json from concurrent.futures import ThreadPoolExecutor, Future import boto3 from oautom import get_logger from oautom.execution.execution import Execution class LambdaExecution(Execution): def __init__(self, name: str, flow: 'Flow', lambda_function: str, payload: dict = {}): super().__init__(name, flow) self._future = None # type: Future self._lambda_arn = lambda_function self._payload = payload def run(self): super().run() # self._logger.info(f"lambda: {self._lambda_arn}") with ThreadPoolExecutor(max_workers=1) as executor: self._future = executor.submit(_run_lambda, self._lambda_arn, self._payload) def check(self) -> bool: return self._future.done() def _run_lambda(lambda_function: str, payload: dict): logger = get_logger() logger.info(f"lambda: {lambda_function}") client = boto3.client('lambda') client.invoke( FunctionName=lambda_function, InvocationType='RequestResponse', LogType='None', Payload=json.dumps(payload), )
29.128205
90
0.681338
import json from concurrent.futures import ThreadPoolExecutor, Future import boto3 from oautom import get_logger from oautom.execution.execution import Execution class LambdaExecution(Execution): def __init__(self, name: str, flow: 'Flow', lambda_function: str, payload: dict = {}): super().__init__(name, flow) self._future = None self._lambda_arn = lambda_function self._payload = payload def run(self): super().run() with ThreadPoolExecutor(max_workers=1) as executor: self._future = executor.submit(_run_lambda, self._lambda_arn, self._payload) def check(self) -> bool: return self._future.done() def _run_lambda(lambda_function: str, payload: dict): logger = get_logger() logger.info(f"lambda: {lambda_function}") client = boto3.client('lambda') client.invoke( FunctionName=lambda_function, InvocationType='RequestResponse', LogType='None', Payload=json.dumps(payload), )
true
true
f724577d79ac37b03a6ecb734534f38b37edce0a
14,449
py
Python
skimage/measure/tests/test_regionprops.py
jjhelmus/scikit-image
b9b5fde0821fe8bcece2528b30d012c65c64ad6f
[ "BSD-3-Clause" ]
2
2017-03-30T11:22:11.000Z
2019-03-03T05:18:01.000Z
skimage/measure/tests/test_regionprops.py
jjhelmus/scikit-image
b9b5fde0821fe8bcece2528b30d012c65c64ad6f
[ "BSD-3-Clause" ]
null
null
null
skimage/measure/tests/test_regionprops.py
jjhelmus/scikit-image
b9b5fde0821fe8bcece2528b30d012c65c64ad6f
[ "BSD-3-Clause" ]
1
2019-12-17T14:53:28.000Z
2019-12-17T14:53:28.000Z
from numpy.testing import assert_array_equal, assert_almost_equal, \ assert_array_almost_equal, assert_raises, assert_equal import numpy as np import math from skimage.measure._regionprops import (regionprops, PROPS, perimeter, _parse_docs) SAMPLE = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1], [0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]] ) INTENSITY_SAMPLE = SAMPLE.copy() INTENSITY_SAMPLE[1, 9:11] = 2 SAMPLE_3D = np.zeros((6, 6, 6), dtype=np.uint8) SAMPLE_3D[1:3, 1:3, 1:3] = 1 SAMPLE_3D[3, 2, 2] = 1 INTENSITY_SAMPLE_3D = SAMPLE_3D.copy() def test_all_props(): region = regionprops(SAMPLE, INTENSITY_SAMPLE)[0] for prop in PROPS: assert_almost_equal(region[prop], getattr(region, PROPS[prop])) def test_all_props_3d(): region = regionprops(SAMPLE_3D, INTENSITY_SAMPLE_3D)[0] for prop in PROPS: try: assert_almost_equal(region[prop], getattr(region, PROPS[prop])) except NotImplementedError: pass def test_dtype(): regionprops(np.zeros((10, 10), dtype=np.int)) regionprops(np.zeros((10, 10), dtype=np.uint)) assert_raises((TypeError), regionprops, np.zeros((10, 10), dtype=np.float)) assert_raises((TypeError), regionprops, np.zeros((10, 10), dtype=np.double)) def test_ndim(): regionprops(np.zeros((10, 10), dtype=np.int)) regionprops(np.zeros((10, 10, 1), dtype=np.int)) regionprops(np.zeros((10, 10, 1, 1), dtype=np.int)) regionprops(np.zeros((10, 10, 10), dtype=np.int)) assert_raises(TypeError, regionprops, np.zeros((10, 10, 10, 2), dtype=np.int)) def test_area(): area = regionprops(SAMPLE)[0].area assert area == np.sum(SAMPLE) area = regionprops(SAMPLE_3D)[0].area assert area == np.sum(SAMPLE_3D) def test_bbox(): bbox = regionprops(SAMPLE)[0].bbox assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1])) SAMPLE_mod = SAMPLE.copy() SAMPLE_mod[:, -1] = 0 bbox = regionprops(SAMPLE_mod)[0].bbox assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1]-1)) bbox = regionprops(SAMPLE_3D)[0].bbox assert_array_almost_equal(bbox, (1, 1, 1, 4, 3, 3)) def test_moments_central(): mu = regionprops(SAMPLE)[0].moments_central # determined with OpenCV assert_almost_equal(mu[0,2], 436.00000000000045) # different from OpenCV results, bug in OpenCV assert_almost_equal(mu[0,3], -737.333333333333) assert_almost_equal(mu[1,1], -87.33333333333303) assert_almost_equal(mu[1,2], -127.5555555555593) assert_almost_equal(mu[2,0], 1259.7777777777774) assert_almost_equal(mu[2,1], 2000.296296296291) assert_almost_equal(mu[3,0], -760.0246913580195) def test_centroid(): centroid = regionprops(SAMPLE)[0].centroid # determined with MATLAB assert_array_almost_equal(centroid, (5.66666666666666, 9.444444444444444)) def test_convex_area(): area = regionprops(SAMPLE)[0].convex_area # determined with MATLAB assert area == 124 def test_convex_image(): img = regionprops(SAMPLE)[0].convex_image # determined with MATLAB ref = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) assert_array_equal(img, ref) def test_coordinates(): sample = np.zeros((10, 10), dtype=np.int8) coords = np.array([[3, 2], [3, 3], [3, 4]]) sample[coords[:, 0], coords[:, 1]] = 1 prop_coords = regionprops(sample)[0].coords assert_array_equal(prop_coords, coords) sample = np.zeros((6, 6, 6), dtype=np.int8) coords = np.array([[1, 1, 1], [1, 2, 1], [1, 3, 1]]) sample[coords[:, 0], coords[:, 1], coords[:, 2]] = 1 prop_coords = regionprops(sample)[0].coords assert_array_equal(prop_coords, coords) def test_eccentricity(): eps = regionprops(SAMPLE)[0].eccentricity assert_almost_equal(eps, 0.814629313427) img = np.zeros((5, 5), dtype=np.int) img[2, 2] = 1 eps = regionprops(img)[0].eccentricity assert_almost_equal(eps, 0) def test_equiv_diameter(): diameter = regionprops(SAMPLE)[0].equivalent_diameter # determined with MATLAB assert_almost_equal(diameter, 9.57461472963) def test_euler_number(): en = regionprops(SAMPLE)[0].euler_number assert en == 1 SAMPLE_mod = SAMPLE.copy() SAMPLE_mod[7, -3] = 0 en = regionprops(SAMPLE_mod)[0].euler_number assert en == 0 def test_extent(): extent = regionprops(SAMPLE)[0].extent assert_almost_equal(extent, 0.4) def test_moments_hu(): hu = regionprops(SAMPLE)[0].moments_hu ref = np.array([ 3.27117627e-01, 2.63869194e-02, 2.35390060e-02, 1.23151193e-03, 1.38882330e-06, -2.72586158e-05, 6.48350653e-06 ]) # bug in OpenCV caused in Central Moments calculation? assert_array_almost_equal(hu, ref) def test_image(): img = regionprops(SAMPLE)[0].image assert_array_equal(img, SAMPLE) img = regionprops(SAMPLE_3D)[0].image assert_array_equal(img, SAMPLE_3D[1:4, 1:3, 1:3]) def test_label(): label = regionprops(SAMPLE)[0].label assert_array_equal(label, 1) label = regionprops(SAMPLE_3D)[0].label assert_array_equal(label, 1) def test_filled_area(): area = regionprops(SAMPLE)[0].filled_area assert area == np.sum(SAMPLE) SAMPLE_mod = SAMPLE.copy() SAMPLE_mod[7, -3] = 0 area = regionprops(SAMPLE_mod)[0].filled_area assert area == np.sum(SAMPLE) def test_filled_image(): img = regionprops(SAMPLE)[0].filled_image assert_array_equal(img, SAMPLE) def test_major_axis_length(): length = regionprops(SAMPLE)[0].major_axis_length # MATLAB has different interpretation of ellipse than found in literature, # here implemented as found in literature assert_almost_equal(length, 16.7924234999) def test_max_intensity(): intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].max_intensity assert_almost_equal(intensity, 2) def test_mean_intensity(): intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].mean_intensity assert_almost_equal(intensity, 1.02777777777777) def test_min_intensity(): intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].min_intensity assert_almost_equal(intensity, 1) def test_minor_axis_length(): length = regionprops(SAMPLE)[0].minor_axis_length # MATLAB has different interpretation of ellipse than found in literature, # here implemented as found in literature assert_almost_equal(length, 9.739302807263) def test_moments(): m = regionprops(SAMPLE)[0].moments # determined with OpenCV assert_almost_equal(m[0,0], 72.0) assert_almost_equal(m[0,1], 408.0) assert_almost_equal(m[0,2], 2748.0) assert_almost_equal(m[0,3], 19776.0) assert_almost_equal(m[1,0], 680.0) assert_almost_equal(m[1,1], 3766.0) assert_almost_equal(m[1,2], 24836.0) assert_almost_equal(m[2,0], 7682.0) assert_almost_equal(m[2,1], 43882.0) assert_almost_equal(m[3,0], 95588.0) def test_moments_normalized(): nu = regionprops(SAMPLE)[0].moments_normalized # determined with OpenCV assert_almost_equal(nu[0,2], 0.08410493827160502) assert_almost_equal(nu[1,1], -0.016846707818929982) assert_almost_equal(nu[1,2], -0.002899800614433943) assert_almost_equal(nu[2,0], 0.24301268861454037) assert_almost_equal(nu[2,1], 0.045473992910668816) assert_almost_equal(nu[3,0], -0.017278118992041805) def test_orientation(): orientation = regionprops(SAMPLE)[0].orientation # determined with MATLAB assert_almost_equal(orientation, 0.10446844651921) # test correct quadrant determination orientation2 = regionprops(SAMPLE.T)[0].orientation assert_almost_equal(orientation2, math.pi / 2 - orientation) # test diagonal regions diag = np.eye(10, dtype=int) orientation_diag = regionprops(diag)[0].orientation assert_almost_equal(orientation_diag, -math.pi / 4) orientation_diag = regionprops(np.flipud(diag))[0].orientation assert_almost_equal(orientation_diag, math.pi / 4) orientation_diag = regionprops(np.fliplr(diag))[0].orientation assert_almost_equal(orientation_diag, math.pi / 4) orientation_diag = regionprops(np.fliplr(np.flipud(diag)))[0].orientation assert_almost_equal(orientation_diag, -math.pi / 4) def test_perimeter(): per = regionprops(SAMPLE)[0].perimeter assert_almost_equal(per, 55.2487373415) per = perimeter(SAMPLE.astype('double'), neighbourhood=8) assert_almost_equal(per, 46.8284271247) def test_solidity(): solidity = regionprops(SAMPLE)[0].solidity # determined with MATLAB assert_almost_equal(solidity, 0.580645161290323) def test_weighted_moments_central(): wmu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].weighted_moments_central ref = np.array( [[ 7.4000000000e+01, -2.1316282073e-13, 4.7837837838e+02, -7.5943608473e+02], [ 3.7303493627e-14, -8.7837837838e+01, -1.4801314828e+02, -1.2714707125e+03], [ 1.2602837838e+03, 2.1571526662e+03, 6.6989799420e+03, 1.5304076361e+04], [ -7.6561796932e+02, -4.2385971907e+03, -9.9501164076e+03, -3.3156729271e+04]] ) np.set_printoptions(precision=10) assert_array_almost_equal(wmu, ref) def test_weighted_centroid(): centroid = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].weighted_centroid assert_array_almost_equal(centroid, (5.540540540540, 9.445945945945)) def test_weighted_moments_hu(): whu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].weighted_moments_hu ref = np.array([ 3.1750587329e-01, 2.1417517159e-02, 2.3609322038e-02, 1.2565683360e-03, 8.3014209421e-07, -3.5073773473e-05, 6.7936409056e-06 ]) assert_array_almost_equal(whu, ref) def test_weighted_moments(): wm = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].weighted_moments ref = np.array( [[ 7.4000000000e+01, 4.1000000000e+02, 2.7500000000e+03, 1.9778000000e+04], [ 6.9900000000e+02, 3.7850000000e+03, 2.4855000000e+04, 1.7500100000e+05], [ 7.8630000000e+03, 4.4063000000e+04, 2.9347700000e+05, 2.0810510000e+06], [ 9.7317000000e+04, 5.7256700000e+05, 3.9007170000e+06, 2.8078871000e+07]] ) assert_array_almost_equal(wm, ref) def test_weighted_moments_normalized(): wnu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].weighted_moments_normalized ref = np.array( [[ np.nan, np.nan, 0.0873590903, -0.0161217406], [ np.nan, -0.0160405109, -0.0031421072, -0.0031376984], [ 0.230146783, 0.0457932622, 0.0165315478, 0.0043903193], [-0.0162529732, -0.0104598869, -0.0028544152, -0.0011057191]] ) assert_array_almost_equal(wnu, ref) def test_label_sequence(): a = np.empty((2, 2), dtype=np.int) a[:, :] = 2 ps = regionprops(a) assert len(ps) == 1 assert ps[0].label == 2 def test_pure_background(): a = np.zeros((2, 2), dtype=np.int) ps = regionprops(a) assert len(ps) == 0 def test_invalid(): ps = regionprops(SAMPLE) def get_intensity_image(): ps[0].intensity_image assert_raises(AttributeError, get_intensity_image) def test_invalid_size(): wrong_intensity_sample = np.array([[1], [1]]) assert_raises(ValueError, regionprops, SAMPLE, wrong_intensity_sample) def test_equals(): arr = np.zeros((100, 100), dtype=np.int) arr[0:25, 0:25] = 1 arr[50:99, 50:99] = 2 regions = regionprops(arr) r1 = regions[0] regions = regionprops(arr) r2 = regions[0] r3 = regions[1] assert_equal(r1 == r2, True, "Same regionprops are not equal") assert_equal(r1 != r3, True, "Different regionprops are equal") def test_iterate_all_props(): region = regionprops(SAMPLE)[0] p0 = dict((p, region[p]) for p in region) region = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[0] p1 = dict((p, region[p]) for p in region) assert len(p0) < len(p1) def test_cache(): region = regionprops(SAMPLE)[0] f0 = region.filled_image region._label_image[:10] = 1 f1 = region.filled_image # Changed underlying image, but cache keeps result the same assert_array_equal(f0, f1) # Now invalidate cache region._cache_active = False f1 = region.filled_image assert np.any(f0 != f1) def test_docstrings_and_props(): region = regionprops(SAMPLE)[0] docs = _parse_docs() props = [m for m in dir(region) if not m.startswith('_')] nr_docs_parsed = len(docs) nr_props = len(props) assert_equal(nr_docs_parsed, nr_props) ds = docs['weighted_moments_normalized'] assert 'iteration' not in ds assert len(ds.split('\n')) > 3 if __name__ == "__main__": from numpy.testing import run_module_suite run_module_suite()
31.479303
82
0.637207
from numpy.testing import assert_array_equal, assert_almost_equal, \ assert_array_almost_equal, assert_raises, assert_equal import numpy as np import math from skimage.measure._regionprops import (regionprops, PROPS, perimeter, _parse_docs) SAMPLE = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1], [0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]] ) INTENSITY_SAMPLE = SAMPLE.copy() INTENSITY_SAMPLE[1, 9:11] = 2 SAMPLE_3D = np.zeros((6, 6, 6), dtype=np.uint8) SAMPLE_3D[1:3, 1:3, 1:3] = 1 SAMPLE_3D[3, 2, 2] = 1 INTENSITY_SAMPLE_3D = SAMPLE_3D.copy() def test_all_props(): region = regionprops(SAMPLE, INTENSITY_SAMPLE)[0] for prop in PROPS: assert_almost_equal(region[prop], getattr(region, PROPS[prop])) def test_all_props_3d(): region = regionprops(SAMPLE_3D, INTENSITY_SAMPLE_3D)[0] for prop in PROPS: try: assert_almost_equal(region[prop], getattr(region, PROPS[prop])) except NotImplementedError: pass def test_dtype(): regionprops(np.zeros((10, 10), dtype=np.int)) regionprops(np.zeros((10, 10), dtype=np.uint)) assert_raises((TypeError), regionprops, np.zeros((10, 10), dtype=np.float)) assert_raises((TypeError), regionprops, np.zeros((10, 10), dtype=np.double)) def test_ndim(): regionprops(np.zeros((10, 10), dtype=np.int)) regionprops(np.zeros((10, 10, 1), dtype=np.int)) regionprops(np.zeros((10, 10, 1, 1), dtype=np.int)) regionprops(np.zeros((10, 10, 10), dtype=np.int)) assert_raises(TypeError, regionprops, np.zeros((10, 10, 10, 2), dtype=np.int)) def test_area(): area = regionprops(SAMPLE)[0].area assert area == np.sum(SAMPLE) area = regionprops(SAMPLE_3D)[0].area assert area == np.sum(SAMPLE_3D) def test_bbox(): bbox = regionprops(SAMPLE)[0].bbox assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1])) SAMPLE_mod = SAMPLE.copy() SAMPLE_mod[:, -1] = 0 bbox = regionprops(SAMPLE_mod)[0].bbox assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1]-1)) bbox = regionprops(SAMPLE_3D)[0].bbox assert_array_almost_equal(bbox, (1, 1, 1, 4, 3, 3)) def test_moments_central(): mu = regionprops(SAMPLE)[0].moments_central assert_almost_equal(mu[0,2], 436.00000000000045) assert_almost_equal(mu[0,3], -737.333333333333) assert_almost_equal(mu[1,1], -87.33333333333303) assert_almost_equal(mu[1,2], -127.5555555555593) assert_almost_equal(mu[2,0], 1259.7777777777774) assert_almost_equal(mu[2,1], 2000.296296296291) assert_almost_equal(mu[3,0], -760.0246913580195) def test_centroid(): centroid = regionprops(SAMPLE)[0].centroid assert_array_almost_equal(centroid, (5.66666666666666, 9.444444444444444)) def test_convex_area(): area = regionprops(SAMPLE)[0].convex_area assert area == 124 def test_convex_image(): img = regionprops(SAMPLE)[0].convex_image ref = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) assert_array_equal(img, ref) def test_coordinates(): sample = np.zeros((10, 10), dtype=np.int8) coords = np.array([[3, 2], [3, 3], [3, 4]]) sample[coords[:, 0], coords[:, 1]] = 1 prop_coords = regionprops(sample)[0].coords assert_array_equal(prop_coords, coords) sample = np.zeros((6, 6, 6), dtype=np.int8) coords = np.array([[1, 1, 1], [1, 2, 1], [1, 3, 1]]) sample[coords[:, 0], coords[:, 1], coords[:, 2]] = 1 prop_coords = regionprops(sample)[0].coords assert_array_equal(prop_coords, coords) def test_eccentricity(): eps = regionprops(SAMPLE)[0].eccentricity assert_almost_equal(eps, 0.814629313427) img = np.zeros((5, 5), dtype=np.int) img[2, 2] = 1 eps = regionprops(img)[0].eccentricity assert_almost_equal(eps, 0) def test_equiv_diameter(): diameter = regionprops(SAMPLE)[0].equivalent_diameter assert_almost_equal(diameter, 9.57461472963) def test_euler_number(): en = regionprops(SAMPLE)[0].euler_number assert en == 1 SAMPLE_mod = SAMPLE.copy() SAMPLE_mod[7, -3] = 0 en = regionprops(SAMPLE_mod)[0].euler_number assert en == 0 def test_extent(): extent = regionprops(SAMPLE)[0].extent assert_almost_equal(extent, 0.4) def test_moments_hu(): hu = regionprops(SAMPLE)[0].moments_hu ref = np.array([ 3.27117627e-01, 2.63869194e-02, 2.35390060e-02, 1.23151193e-03, 1.38882330e-06, -2.72586158e-05, 6.48350653e-06 ]) assert_array_almost_equal(hu, ref) def test_image(): img = regionprops(SAMPLE)[0].image assert_array_equal(img, SAMPLE) img = regionprops(SAMPLE_3D)[0].image assert_array_equal(img, SAMPLE_3D[1:4, 1:3, 1:3]) def test_label(): label = regionprops(SAMPLE)[0].label assert_array_equal(label, 1) label = regionprops(SAMPLE_3D)[0].label assert_array_equal(label, 1) def test_filled_area(): area = regionprops(SAMPLE)[0].filled_area assert area == np.sum(SAMPLE) SAMPLE_mod = SAMPLE.copy() SAMPLE_mod[7, -3] = 0 area = regionprops(SAMPLE_mod)[0].filled_area assert area == np.sum(SAMPLE) def test_filled_image(): img = regionprops(SAMPLE)[0].filled_image assert_array_equal(img, SAMPLE) def test_major_axis_length(): length = regionprops(SAMPLE)[0].major_axis_length assert_almost_equal(length, 16.7924234999) def test_max_intensity(): intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].max_intensity assert_almost_equal(intensity, 2) def test_mean_intensity(): intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].mean_intensity assert_almost_equal(intensity, 1.02777777777777) def test_min_intensity(): intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].min_intensity assert_almost_equal(intensity, 1) def test_minor_axis_length(): length = regionprops(SAMPLE)[0].minor_axis_length assert_almost_equal(length, 9.739302807263) def test_moments(): m = regionprops(SAMPLE)[0].moments assert_almost_equal(m[0,0], 72.0) assert_almost_equal(m[0,1], 408.0) assert_almost_equal(m[0,2], 2748.0) assert_almost_equal(m[0,3], 19776.0) assert_almost_equal(m[1,0], 680.0) assert_almost_equal(m[1,1], 3766.0) assert_almost_equal(m[1,2], 24836.0) assert_almost_equal(m[2,0], 7682.0) assert_almost_equal(m[2,1], 43882.0) assert_almost_equal(m[3,0], 95588.0) def test_moments_normalized(): nu = regionprops(SAMPLE)[0].moments_normalized assert_almost_equal(nu[0,2], 0.08410493827160502) assert_almost_equal(nu[1,1], -0.016846707818929982) assert_almost_equal(nu[1,2], -0.002899800614433943) assert_almost_equal(nu[2,0], 0.24301268861454037) assert_almost_equal(nu[2,1], 0.045473992910668816) assert_almost_equal(nu[3,0], -0.017278118992041805) def test_orientation(): orientation = regionprops(SAMPLE)[0].orientation assert_almost_equal(orientation, 0.10446844651921) orientation2 = regionprops(SAMPLE.T)[0].orientation assert_almost_equal(orientation2, math.pi / 2 - orientation) diag = np.eye(10, dtype=int) orientation_diag = regionprops(diag)[0].orientation assert_almost_equal(orientation_diag, -math.pi / 4) orientation_diag = regionprops(np.flipud(diag))[0].orientation assert_almost_equal(orientation_diag, math.pi / 4) orientation_diag = regionprops(np.fliplr(diag))[0].orientation assert_almost_equal(orientation_diag, math.pi / 4) orientation_diag = regionprops(np.fliplr(np.flipud(diag)))[0].orientation assert_almost_equal(orientation_diag, -math.pi / 4) def test_perimeter(): per = regionprops(SAMPLE)[0].perimeter assert_almost_equal(per, 55.2487373415) per = perimeter(SAMPLE.astype('double'), neighbourhood=8) assert_almost_equal(per, 46.8284271247) def test_solidity(): solidity = regionprops(SAMPLE)[0].solidity assert_almost_equal(solidity, 0.580645161290323) def test_weighted_moments_central(): wmu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].weighted_moments_central ref = np.array( [[ 7.4000000000e+01, -2.1316282073e-13, 4.7837837838e+02, -7.5943608473e+02], [ 3.7303493627e-14, -8.7837837838e+01, -1.4801314828e+02, -1.2714707125e+03], [ 1.2602837838e+03, 2.1571526662e+03, 6.6989799420e+03, 1.5304076361e+04], [ -7.6561796932e+02, -4.2385971907e+03, -9.9501164076e+03, -3.3156729271e+04]] ) np.set_printoptions(precision=10) assert_array_almost_equal(wmu, ref) def test_weighted_centroid(): centroid = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].weighted_centroid assert_array_almost_equal(centroid, (5.540540540540, 9.445945945945)) def test_weighted_moments_hu(): whu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].weighted_moments_hu ref = np.array([ 3.1750587329e-01, 2.1417517159e-02, 2.3609322038e-02, 1.2565683360e-03, 8.3014209421e-07, -3.5073773473e-05, 6.7936409056e-06 ]) assert_array_almost_equal(whu, ref) def test_weighted_moments(): wm = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].weighted_moments ref = np.array( [[ 7.4000000000e+01, 4.1000000000e+02, 2.7500000000e+03, 1.9778000000e+04], [ 6.9900000000e+02, 3.7850000000e+03, 2.4855000000e+04, 1.7500100000e+05], [ 7.8630000000e+03, 4.4063000000e+04, 2.9347700000e+05, 2.0810510000e+06], [ 9.7317000000e+04, 5.7256700000e+05, 3.9007170000e+06, 2.8078871000e+07]] ) assert_array_almost_equal(wm, ref) def test_weighted_moments_normalized(): wnu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE )[0].weighted_moments_normalized ref = np.array( [[ np.nan, np.nan, 0.0873590903, -0.0161217406], [ np.nan, -0.0160405109, -0.0031421072, -0.0031376984], [ 0.230146783, 0.0457932622, 0.0165315478, 0.0043903193], [-0.0162529732, -0.0104598869, -0.0028544152, -0.0011057191]] ) assert_array_almost_equal(wnu, ref) def test_label_sequence(): a = np.empty((2, 2), dtype=np.int) a[:, :] = 2 ps = regionprops(a) assert len(ps) == 1 assert ps[0].label == 2 def test_pure_background(): a = np.zeros((2, 2), dtype=np.int) ps = regionprops(a) assert len(ps) == 0 def test_invalid(): ps = regionprops(SAMPLE) def get_intensity_image(): ps[0].intensity_image assert_raises(AttributeError, get_intensity_image) def test_invalid_size(): wrong_intensity_sample = np.array([[1], [1]]) assert_raises(ValueError, regionprops, SAMPLE, wrong_intensity_sample) def test_equals(): arr = np.zeros((100, 100), dtype=np.int) arr[0:25, 0:25] = 1 arr[50:99, 50:99] = 2 regions = regionprops(arr) r1 = regions[0] regions = regionprops(arr) r2 = regions[0] r3 = regions[1] assert_equal(r1 == r2, True, "Same regionprops are not equal") assert_equal(r1 != r3, True, "Different regionprops are equal") def test_iterate_all_props(): region = regionprops(SAMPLE)[0] p0 = dict((p, region[p]) for p in region) region = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[0] p1 = dict((p, region[p]) for p in region) assert len(p0) < len(p1) def test_cache(): region = regionprops(SAMPLE)[0] f0 = region.filled_image region._label_image[:10] = 1 f1 = region.filled_image assert_array_equal(f0, f1) region._cache_active = False f1 = region.filled_image assert np.any(f0 != f1) def test_docstrings_and_props(): region = regionprops(SAMPLE)[0] docs = _parse_docs() props = [m for m in dir(region) if not m.startswith('_')] nr_docs_parsed = len(docs) nr_props = len(props) assert_equal(nr_docs_parsed, nr_props) ds = docs['weighted_moments_normalized'] assert 'iteration' not in ds assert len(ds.split('\n')) > 3 if __name__ == "__main__": from numpy.testing import run_module_suite run_module_suite()
true
true
f724579a93aae45369d068c1ff4226dc640ec527
1,153
py
Python
Easy21/plot_cuts.py
vuk119/RL
2f5309bfff719b2965060492a19d008ed8382856
[ "MIT" ]
null
null
null
Easy21/plot_cuts.py
vuk119/RL
2f5309bfff719b2965060492a19d008ed8382856
[ "MIT" ]
null
null
null
Easy21/plot_cuts.py
vuk119/RL
2f5309bfff719b2965060492a19d008ed8382856
[ "MIT" ]
null
null
null
""" Some useful plot functions """ import matplotlib.pyplot as plt import numpy as np def matrix_surf(m, xlimits=None, ylimits=None, **kwargs): if xlimits is None: xlimits = [0, m.shape[0]] if ylimits is None: ylimits = [0, m.shape[1]] Y, X = np.meshgrid(np.arange(ylimits[0], ylimits[1]), np.arange(xlimits[0], xlimits[1])) fig = plt.figure() ax = fig.add_subplot(111,projection='3d',**kwargs) ax.plot_surface(X,Y,m) plt.show() def matrix_scatter(m): X=[] Y=[] Z=[] for i in range(m.shape[0]): for j in range(m.shape[1]): X.append(i) Y.append(j) Z.append(m[i,j]) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(X, Y, Z) plt.show() # mat = np.zeros((6,5)) # mat[0,0] = 5 # mat[0,1] = 4 # mat[1,0] = 4 # mat[1,1] = 3 # mat[1,2] = 3 # mat[2,1] = 3 # mat[0,2] = 3 # mat[2,0] = 3 # mat[0,3] = 3 # mat[3,0] = 3 # matrix_surf(mat, xlabel = 'X AXIS', ylabel = 'Y AXIS', zlabel='Z', xticks =range(10)) # # # # Y, X = np.meshgrid(np.arange(mat.shape[1]), np.arange(mat.shape[0])) # # print(X) # print(Y)
18.901639
92
0.548135
import matplotlib.pyplot as plt import numpy as np def matrix_surf(m, xlimits=None, ylimits=None, **kwargs): if xlimits is None: xlimits = [0, m.shape[0]] if ylimits is None: ylimits = [0, m.shape[1]] Y, X = np.meshgrid(np.arange(ylimits[0], ylimits[1]), np.arange(xlimits[0], xlimits[1])) fig = plt.figure() ax = fig.add_subplot(111,projection='3d',**kwargs) ax.plot_surface(X,Y,m) plt.show() def matrix_scatter(m): X=[] Y=[] Z=[] for i in range(m.shape[0]): for j in range(m.shape[1]): X.append(i) Y.append(j) Z.append(m[i,j]) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(X, Y, Z) plt.show()
true
true
f7245a27daac0be722083cf7badfe71aa73117d0
110
py
Python
torsionfit/tests/test_import.py
ChayaSt/torsionfit
0b810b6da4a930b13c3ab8f8b700c6834824173b
[ "MIT" ]
14
2015-10-09T15:46:09.000Z
2020-11-25T15:30:28.000Z
torsionfit/tests/test_import.py
ChayaSt/torsionfit
0b810b6da4a930b13c3ab8f8b700c6834824173b
[ "MIT" ]
25
2015-08-28T02:09:08.000Z
2019-08-06T19:29:41.000Z
torsionfit/tests/test_import.py
ChayaSt/torsionfit
0b810b6da4a930b13c3ab8f8b700c6834824173b
[ "MIT" ]
3
2015-05-14T19:34:03.000Z
2015-07-27T21:46:36.000Z
#!/usr/bin/python def test_import(): """ Testing import of torsionfit. """ import torsionfit
13.75
33
0.609091
def test_import(): import torsionfit
true
true
f7245a5a8dda4747976414d44dfdefe86c9c2717
1,861
py
Python
pyvoqc/cirq/voqc_optimization.py
akshajgaur/pyvoqc
6352d64542be5fed72e7cae941d4a2a7db012a4f
[ "MIT" ]
1
2021-11-17T10:50:50.000Z
2021-11-17T10:50:50.000Z
pyvoqc/cirq/voqc_optimization.py
akshajgaur/pyvoqc
6352d64542be5fed72e7cae941d4a2a7db012a4f
[ "MIT" ]
5
2021-03-14T20:13:25.000Z
2021-04-10T01:15:05.000Z
pyvoqc/cirq/voqc_optimization.py
akshajgaur/pyvoqc
6352d64542be5fed72e7cae941d4a2a7db012a4f
[ "MIT" ]
2
2021-03-13T16:16:34.000Z
2022-01-27T19:28:15.000Z
from cirq import circuits, ops, protocols import cirq from cirq.contrib.qasm_import import circuit_from_qasm, qasm import re import os from cirq import decompose from cirq.circuits import Circuit from pyvoqc.formatting.format_from_qasm import format_from_qasm from pyvoqc.formatting.rzq_to_rz import rzq_to_rz from pyvoqc.voqc import VOQC from pyvoqc.exceptions import InvalidVOQCFunction,InvalidVOQCGate from pyvoqc.cirq.decompose_cirq_gates import * class CqVOQC: def __init__(self, func = None): self.functions = ["optimize", "not_propagation", "cancel_single_qubit_gates", "cancel_two_qubit_gates", "hadamard_reduction", "merge_rotations"] self.func = func if func else ["optimize"] for i in range(len(self.func)): if ((self.func[i] in self.functions) == False): raise InvalidVOQCFunction(str(self.func[i]), self.functions) def optimize_circuit(self, circuit: circuits.Circuit): #Write qasm file from circuit circuit = Circuit(decompose(circuit, intercepting_decomposer=decompose_library,keep=need_to_keep)) qasm_str = cirq.qasm(circuit) f = open("temp.qasm", "w") f.write(qasm_str) f.close() #Call VOQC optimizations from input list and go from rzq to rz t = self.function_call("temp.qasm") rzq_to_rz("temp2.qasm") #Get Cirq Circuit from qasm file with open("temp2.qasm", "r") as f: c = f.read() circ = circuit_from_qasm(c) #Remove temporary files os.remove("temp.qasm") os.remove("temp2.qasm") return circ def function_call(self,fname_in): a = VOQC(fname_in, False) for i in range(len(self.func)): call = getattr(a,self.func[i]) call() return a.write("temp2.qasm")
36.490196
152
0.661472
from cirq import circuits, ops, protocols import cirq from cirq.contrib.qasm_import import circuit_from_qasm, qasm import re import os from cirq import decompose from cirq.circuits import Circuit from pyvoqc.formatting.format_from_qasm import format_from_qasm from pyvoqc.formatting.rzq_to_rz import rzq_to_rz from pyvoqc.voqc import VOQC from pyvoqc.exceptions import InvalidVOQCFunction,InvalidVOQCGate from pyvoqc.cirq.decompose_cirq_gates import * class CqVOQC: def __init__(self, func = None): self.functions = ["optimize", "not_propagation", "cancel_single_qubit_gates", "cancel_two_qubit_gates", "hadamard_reduction", "merge_rotations"] self.func = func if func else ["optimize"] for i in range(len(self.func)): if ((self.func[i] in self.functions) == False): raise InvalidVOQCFunction(str(self.func[i]), self.functions) def optimize_circuit(self, circuit: circuits.Circuit): circuit = Circuit(decompose(circuit, intercepting_decomposer=decompose_library,keep=need_to_keep)) qasm_str = cirq.qasm(circuit) f = open("temp.qasm", "w") f.write(qasm_str) f.close() t = self.function_call("temp.qasm") rzq_to_rz("temp2.qasm") with open("temp2.qasm", "r") as f: c = f.read() circ = circuit_from_qasm(c) os.remove("temp.qasm") os.remove("temp2.qasm") return circ def function_call(self,fname_in): a = VOQC(fname_in, False) for i in range(len(self.func)): call = getattr(a,self.func[i]) call() return a.write("temp2.qasm")
true
true
f7245abaa8a5cb38551388a2aee2cf80e1403e41
12,195
py
Python
Scripts/plot_ProfileVar_Monthly_FDR.py
zmlabe/StratoVari
c5549f54482a2b05e89bded3e3b0b3c9faa686f3
[ "MIT" ]
4
2019-11-23T19:44:21.000Z
2020-02-20T16:54:45.000Z
Scripts/plot_ProfileVar_Monthly_FDR.py
zmlabe/StratoVari
c5549f54482a2b05e89bded3e3b0b3c9faa686f3
[ "MIT" ]
null
null
null
Scripts/plot_ProfileVar_Monthly_FDR.py
zmlabe/StratoVari
c5549f54482a2b05e89bded3e3b0b3c9faa686f3
[ "MIT" ]
2
2019-06-21T19:27:55.000Z
2021-02-12T19:13:22.000Z
""" Plot vertical plots of PAMIP data for each month from November to April using the ensemble mean (300) Notes ----- Author : Zachary Labe Date : 26 June 2019 """ ### Import modules import numpy as np import matplotlib.pyplot as plt import datetime import read_MonthlyData as MO import statsmodels.stats.multitest as fdr import cmocean import itertools ### Define directories directorydata = '/seley/zlabe/simu/' directoryfigure = '/home/zlabe/Desktop/STRATOVARI/' #directoryfigure = '/home/zlabe/Documents/Research/SITperturb/Figures/' ### Define time now = datetime.datetime.now() currentmn = str(now.month) currentdy = str(now.day) currentyr = str(now.year) currenttime = currentmn + '_' + currentdy + '_' + currentyr titletime = currentmn + '/' + currentdy + '/' + currentyr print('\n' '----Plotting Monthly Vertical Profiles- %s----' % titletime) ### Alott time series (300 ensemble members) year1 = 1701 year2 = 2000 years = np.arange(year1,year2+1,1) ############################################################################### ############################################################################### ############################################################################### ### Call arguments varnames = ['U','GEOP','TEMP','V','EGR'] def calc_indttestfdr(varx,vary): """ Function calculates statistical difference for 2 independent sample t-test Parameters ---------- varx : 3d array vary : 3d array Returns ------- stat = calculated t-statistic pvalue = two-tailed p-value Usage ----- stat,pvalue = calc_ttest(varx,vary) """ print('\n>>> Using calc_ttest function!') ### Import modules import scipy.stats as sts ### 2-independent sample t-test stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit') print('*Completed: Finished calc_ttest function!') return stat,pvalue ###################### def readDataPeriods(varnames,sliceq): ### Call function for 4d variable data lat,lon,lev,varfuture = MO.readExperiAll(varnames,'Future','profile') lat,lon,lev,varpast = MO.readExperiAll(varnames,'Past','profile') ### Select ensemble mean period if sliceq == 'Mean': varfuture = varfuture[:,:,:,:,:] varpast = varpast[:,:,:,:,:] elif sliceq == 'A': varfuture = varfuture[:100,:,:,:,:] varpast = varpast[:100,:,:,:,:] elif sliceq == 'B': varfuture = varfuture[100:200,:,:,:,:] varpast = varpast[100:200,:,:,:,:] elif sliceq == 'C': varfuture = varfuture[200:,:,:,:,:] varpast = varpast[200:,:,:,:,:] ### Create 2d array of latitude and longitude lon2,lat2 = np.meshgrid(lon,lat) ### Remove missing data varfuture[np.where(varfuture <= -1e10)] = np.nan varpast[np.where(varpast <= -1e10)] = np.nan ### Rearrange months (N,D,J,F,M,A) varfuturem = np.append(varfuture[:,-2:,:,:,:],varfuture[:,:4,:,:,:], axis=1) varpastm = np.append(varpast[:,-2:,:,:,:],varpast[:,:4,:,:,:],axis=1) ### Calculate zonal means varfuturemz = np.nanmean(varfuturem,axis=4) varpastmz = np.nanmean(varpastm,axis=4) ### Calculate anomalies anompi = varfuturemz - varpastmz ### Calculate ensemble mean anompim = np.nanmean(anompi,axis=0) zdiffruns = anompim ### Calculate climatologies zclimo = np.nanmean(varpastmz,axis=0) ### Calculate significance for each month stat_past = np.empty((varpastm.shape[1],len(lev),len(lat))) pvalue_past = np.empty((varpastm.shape[1],len(lev),len(lat))) for i in range(varpastm.shape[1]): stat_past[i],pvalue_past[i] = calc_indttestfdr(varfuturemz[:,i,:,:], varpastmz[:,i,:,:]) ### Ravel into month x all p values prunsr = np.reshape(pvalue_past, (pvalue_past.shape[0],pvalue_past.shape[1] \ * pvalue_past.shape[2])) ### Calculate false discovery rate prunsq = np.empty((prunsr.shape)) prunsq.fill(np.nan) prunsqq = np.empty((prunsr.shape[1])) prunsqq.fill(np.nan) for i in range(prunsr.shape[0]): ### Check for nans before correction!! mask = np.isfinite(prunsr[i,:]) prunsrr = prunsr[i,:] score,prunsqq[mask] = fdr.fdrcorrection(prunsrr[mask],alpha=0.05, method='indep') prunsq[i,:] = prunsqq ### Reshape into month x lat x lon pruns = np.reshape(prunsq,(pvalue_past.shape)) ### Mask variables by their adjusted p-values pruns[np.where(pruns >= 0.05)] = np.nan pruns[np.where(pruns < 0.05)] = 1. pruns[np.where(np.isnan(pruns))] = 0. return zdiffruns,zclimo,pruns,lat,lon,lev ########################################################################### ########################################################################### ########################################################################### ### Read in data for v in range(len(varnames)): diffm,climom,pvalm,lat,lon,lev = readDataPeriods(varnames[v],'Mean') diffa,climoa,pvala,lat,lon,lev = readDataPeriods(varnames[v],'A') diffb,climob,pvalb,lat,lon,lev = readDataPeriods(varnames[v],'B') diffc,climoc,pvalc,lat,lon,lev = readDataPeriods(varnames[v],'C') varn = list(itertools.chain(*[diffm,diffa,diffb,diffc])) zclimo = list(itertools.chain(*[climom,climoa,climob,climoc])) pvarn = list(itertools.chain(*[pvalm,pvala,pvalb,pvalc])) ### Plot Variables plt.rc('text',usetex=True) plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) ### Set limits for contours and colorbars if varnames[v] == 'U': limit = np.arange(-2,2.1,0.1) barlim = np.arange(-2,3,1) elif varnames[v] == 'TEMP': limit = np.arange(-4,4.1,0.2) barlim = np.arange(-4,5,1) elif varnames[v] == 'GEOP': limit = np.arange(-60,61,2) barlim = np.arange(-60,61,30) elif varnames[v] == 'V': limit = np.arange(-0.2,0.21,0.02) barlim = np.arange(-0.2,0.3,0.1) elif varnames[v] == 'EGR': limit = np.arange(-0.08,0.081,0.005) barlim = np.arange(-0.08,0.09,0.04) zscale = np.array([1000,700,500,300,200, 100,50,30,10]) latq,levq = np.meshgrid(lat,lev) fig = plt.figure() for i in range(len(varn)): ax1 = plt.subplot(4,6,i+1) ax1.spines['top'].set_color('dimgrey') ax1.spines['right'].set_color('dimgrey') ax1.spines['bottom'].set_color('dimgrey') ax1.spines['left'].set_color('dimgrey') ax1.spines['left'].set_linewidth(2) ax1.spines['bottom'].set_linewidth(2) ax1.spines['right'].set_linewidth(2) ax1.spines['top'].set_linewidth(2) ax1.tick_params(axis='y',direction='out',which='major',pad=3, width=2,color='dimgrey') ax1.tick_params(axis='x',direction='out',which='major',pad=3, width=2,color='dimgrey') cs = plt.contourf(lat,lev,varn[i]*pvarn[i],limit,extend='both') if varnames[v] == 'U': cs2 = plt.contour(lat,lev,zclimo[i],np.arange(-20,101,5), linewidths=0.5,colors='dimgrey') plt.gca().invert_yaxis() plt.yscale('log',nonposy='clip') plt.xticks(np.arange(0,96,30),map(str,np.arange(0,91,30)),fontsize=5) plt.yticks(zscale,map(str,zscale),ha='right',fontsize=5) plt.minorticks_off() plt.xlim([0,90]) plt.ylim([1000,10]) if any([i==0,i==6,i==12,i==18]): ax1.tick_params(labelleft='on') else: ax1.tick_params(labelleft='off') if i < 18: ax1.tick_params(labelbottom='off') if any([i==0,i==6,i==12]): ax1.tick_params(axis='y',direction='out',which='major',pad=3, width=2,color='dimgrey') ax1.tick_params(axis='x',direction='out',which='major',pad=3, width=0,color='dimgrey') else: if i < 24 and i != 18: ax1.tick_params(axis='y',direction='out',which='major',pad=3, width=0,color='dimgrey') if i < 18: ax1.tick_params(axis='y',direction='out',which='major', pad=3,width=0,color='dimgrey') ax1.tick_params(axis='x',direction='out',which='major', pad=3,width=0,color='dimgrey') if varnames[v] == 'U': cmap = cmocean.cm.balance cs.set_cmap(cmap) elif varnames[v] == 'TEMP': cmap = cmocean.cm.balance cs.set_cmap(cmap) elif varnames[v] == 'GEOP': cmap = cmocean.cm.balance cs.set_cmap(cmap) elif varnames[v] == 'V': cmap = cmocean.cm.balance cs.set_cmap(cmap) elif varnames[v] == 'EGR': cmap = cmocean.cm.diff cs.set_cmap(cmap) labelmonths = [r'NOV',r'DEC',r'JAN',r'FEB',r'MAR',r'APR'] if i < 6: ax1.annotate(r'\textbf{%s}' % labelmonths[i], xy=(0, 0),xytext=(0.5,1.13),xycoords='axes fraction', fontsize=13,color='dimgrey',rotation=0, ha='center',va='center') if i==0: plt.annotate(r'\textbf{Mean}', xy=(0, 0),xytext=(-0.6,0.5),xycoords='axes fraction', fontsize=15,color='k',rotation=90, ha='center',va='center') elif i==6: plt.annotate(r'\textbf{A}', xy=(0, 0),xytext=(-0.6,0.5),xycoords='axes fraction', fontsize=15,color='k',rotation=90, ha='center',va='center') elif i==12: plt.annotate(r'\textbf{B}', xy=(0, 0),xytext=(-0.6,0.5),xycoords='axes fraction', fontsize=15,color='k',rotation=90, ha='center',va='center') elif i==18: plt.annotate(r'\textbf{C}', xy=(0, 0),xytext=(-0.6,0.5),xycoords='axes fraction', fontsize=15,color='k',rotation=90, ha='center',va='center') cbar_ax = fig.add_axes([0.312,0.07,0.4,0.02]) cbar = fig.colorbar(cs,cax=cbar_ax,orientation='horizontal', extend='both',extendfrac=0.07,drawedges=False) if varnames[v] == 'U': cbar.set_label(r'\textbf{m/s}',fontsize=9,color='dimgray', labelpad=0) elif varnames[v] == 'TEMP': cbar.set_label(r'\textbf{$^\circ$C}',fontsize=9,color='dimgray', labelpad=0) elif varnames[v] == 'GEOP': cbar.set_label(r'\textbf{m}',fontsize=9,color='dimgray', labelpad=0) elif varnames[v] == 'V': cbar.set_label(r'\textbf{m/s}',fontsize=9,color='dimgray', labelpad=0) elif varnames[v] == 'EGR': cbar.set_label(r'\textbf{1/day}',fontsize=9,color='dimgray', labelpad=0) cbar.set_ticks(barlim) cbar.set_ticklabels(list(map(str,barlim))) cbar.ax.tick_params(axis='x', size=.01) cbar.outline.set_edgecolor('dimgrey') cbar.outline.set_linewidth(0.5) cbar.ax.tick_params(labelsize=6) plt.annotate(r'\textbf{Latitude ($^{\circ}$N)', xy=(0, 0),xytext=(0.515,0.12),xycoords='figure fraction', fontsize=6,color='k',rotation=0, ha='center',va='center') plt.subplots_adjust(hspace=0.1,bottom=0.17,top=0.93,wspace=0.1) plt.savefig(directoryfigure + '%s_MonthlyProfiles_100yr_FDR.png' % varnames[v], dpi=300) print('Completed: Script done!')
36.8429
83
0.526117
tlib.pyplot as plt import datetime import read_MonthlyData as MO import statsmodels.stats.multitest as fdr import cmocean import itertools ectoryfigure = '/home/zlabe/Desktop/STRATOVARI/' w.month) currentdy = str(now.day) currentyr = str(now.year) currenttime = currentmn + '_' + currentdy + '_' + currentyr titletime = currentmn + '/' + currentdy + '/' + currentyr print('\n' '----Plotting Monthly Vertical Profiles- %s----' % titletime)
true
true
f7245c8c7d5a9be02f2a22ebc20f3d4370dc8234
1,216
py
Python
package.py
rittikaadhikari/stock-recommendation
1f14276a955301b1c6fa1c00bd88b00cf5668d8c
[ "MIT" ]
null
null
null
package.py
rittikaadhikari/stock-recommendation
1f14276a955301b1c6fa1c00bd88b00cf5668d8c
[ "MIT" ]
null
null
null
package.py
rittikaadhikari/stock-recommendation
1f14276a955301b1c6fa1c00bd88b00cf5668d8c
[ "MIT" ]
null
null
null
# Inspired by npm's package.json file name = 'hisa' version = '0.1.0' release = '0.1.0' description = 'A stock market predictor and model builder' long_description = ['README.md'] keywords = ['neural', 'network', 'machine', 'deep', 'learning', 'tensorflow', 'stock', 'market', 'prediction'] authors = [ { 'name': 'Rittika Adhikari', 'email': 'rittika.adhikari@gmail.com' }, { 'name': 'Sahil Modi', 'email': 'sm34524@gmail.com'}, { 'name': 'Utkarsh Awasthi', 'email': 'navamawasthi@gmail.com'} ] maintainers = [ { 'name': 'Rittika Adhikari', 'email': 'rittika.adhikari@gmail.com' }, { 'name': 'Sahil Modi', 'email': 'sm34524@gmail.com'}, { 'name': 'Utkarsh Awasthi', 'email': 'navamawasthi@gmail.com'} ] license = 'MIT' modules = [ 'hisa', 'hisa.config', 'hisa._util', 'hisa.capsule', 'hisa.learn', 'hisa.learn.models', 'hisa.learn.sentiment', ] github_username = 'rittikaadhikari' github_repository = 'hisa' github_url = '{baseurl}/{username}/{repository}'.format( baseurl = 'https://github.com', username = github_username, repository = github_repository)
34.742857
74
0.591283
name = 'hisa' version = '0.1.0' release = '0.1.0' description = 'A stock market predictor and model builder' long_description = ['README.md'] keywords = ['neural', 'network', 'machine', 'deep', 'learning', 'tensorflow', 'stock', 'market', 'prediction'] authors = [ { 'name': 'Rittika Adhikari', 'email': 'rittika.adhikari@gmail.com' }, { 'name': 'Sahil Modi', 'email': 'sm34524@gmail.com'}, { 'name': 'Utkarsh Awasthi', 'email': 'navamawasthi@gmail.com'} ] maintainers = [ { 'name': 'Rittika Adhikari', 'email': 'rittika.adhikari@gmail.com' }, { 'name': 'Sahil Modi', 'email': 'sm34524@gmail.com'}, { 'name': 'Utkarsh Awasthi', 'email': 'navamawasthi@gmail.com'} ] license = 'MIT' modules = [ 'hisa', 'hisa.config', 'hisa._util', 'hisa.capsule', 'hisa.learn', 'hisa.learn.models', 'hisa.learn.sentiment', ] github_username = 'rittikaadhikari' github_repository = 'hisa' github_url = '{baseurl}/{username}/{repository}'.format( baseurl = 'https://github.com', username = github_username, repository = github_repository)
true
true
f7245ff14c37b2aedccbcae8d5af768476c65b57
587
py
Python
setup.py
dimitri-yatsenko/adamacs
c83ce744d207fb5fa3e7069a15cff4a52b9dcf52
[ "MIT" ]
null
null
null
setup.py
dimitri-yatsenko/adamacs
c83ce744d207fb5fa3e7069a15cff4a52b9dcf52
[ "MIT" ]
null
null
null
setup.py
dimitri-yatsenko/adamacs
c83ce744d207fb5fa3e7069a15cff4a52b9dcf52
[ "MIT" ]
null
null
null
from setuptools import setup with open('README.md') as f: readme = f.read() with open('LICENSE') as f: license = f.read() setup( name='adamacs', version='0.0.1', description='Architectures for Data Management and Computational Support.', long_description=readme, author='Daniel Müller-Komorowska', author_email='danielmuellermsc@gmail.com', url='https://github.com/SFB1089/adamacs.git', license=license, packages=['adamacs'], install_requires=[ 'numpy', 'pandas', 'matplotlib', 'scipy' ])
22.576923
79
0.623509
from setuptools import setup with open('README.md') as f: readme = f.read() with open('LICENSE') as f: license = f.read() setup( name='adamacs', version='0.0.1', description='Architectures for Data Management and Computational Support.', long_description=readme, author='Daniel Müller-Komorowska', author_email='danielmuellermsc@gmail.com', url='https://github.com/SFB1089/adamacs.git', license=license, packages=['adamacs'], install_requires=[ 'numpy', 'pandas', 'matplotlib', 'scipy' ])
true
true
f7246097ee00e1071047af06705d6ed6469d67cb
7,066
py
Python
tensorflow/contrib/keras/python/keras/models_test.py
DEVESHTARASIA/tensorflow
d3edb8c60ed4fd831d62833ed22f5c23486c561c
[ "Apache-2.0" ]
384
2017-02-21T18:38:04.000Z
2022-02-22T07:30:25.000Z
tensorflow/contrib/keras/python/keras/models_test.py
DEVESHTARASIA/tensorflow
d3edb8c60ed4fd831d62833ed22f5c23486c561c
[ "Apache-2.0" ]
15
2017-03-01T20:18:43.000Z
2020-05-07T10:33:51.000Z
tensorflow/contrib/keras/python/keras/models_test.py
DEVESHTARASIA/tensorflow
d3edb8c60ed4fd831d62833ed22f5c23486c561c
[ "Apache-2.0" ]
81
2017-02-21T19:31:19.000Z
2022-02-22T07:30:24.000Z
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for training routines.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile import numpy as np from tensorflow.contrib.keras.python import keras from tensorflow.python.platform import test try: import h5py # pylint:disable=g-import-not-at-top except ImportError: h5py = None class TestModelSaving(test.TestCase): def test_sequential_model_saving(self): if h5py is None: return # Skip test if models cannot be saved. with self.test_session(): model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_shape=(3,))) model.add(keras.layers.RepeatVector(3)) model.add(keras.layers.TimeDistributed(keras.layers.Dense(3))) model.compile(loss=keras.losses.MSE, optimizer=keras.optimizers.RMSprop(lr=0.0001), metrics=[keras.metrics.categorical_accuracy], sample_weight_mode='temporal') x = np.random.random((1, 3)) y = np.random.random((1, 3, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') keras.models.save_model(model, fname) new_model = keras.models.load_model(fname) os.remove(fname) out2 = new_model.predict(x) self.assertAllClose(out, out2, atol=1e-05) # test that new updates are the same with both models x = np.random.random((1, 3)) y = np.random.random((1, 3, 3)) model.train_on_batch(x, y) new_model.train_on_batch(x, y) out = model.predict(x) out2 = new_model.predict(x) self.assertAllClose(out, out2, atol=1e-05) def test_sequential_model_saving_2(self): if h5py is None: return # Skip test if models cannot be saved. with self.test_session(): # test with custom optimizer, loss class CustomOp(keras.optimizers.RMSprop): pass def custom_loss(y_true, y_pred): return keras.losses.mse(y_true, y_pred) model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_shape=(3,))) model.add(keras.layers.Dense(3)) model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc']) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') keras.models.save_model(model, fname) model = keras.models.load_model( fname, custom_objects={'CustomOp': CustomOp, 'custom_loss': custom_loss}) os.remove(fname) out2 = model.predict(x) self.assertAllClose(out, out2, atol=1e-05) def test_functional_model_saving(self): if h5py is None: return # Skip test if models cannot be saved. with self.test_session(): inputs = keras.layers.Input(shape=(3,)) x = keras.layers.Dense(2)(inputs) output = keras.layers.Dense(3)(x) model = keras.models.Model(inputs, output) model.compile(loss=keras.losses.MSE, optimizer=keras.optimizers.RMSprop(lr=0.0001), metrics=[keras.metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') keras.models.save_model(model, fname) model = keras.models.load_model(fname) os.remove(fname) out2 = model.predict(x) self.assertAllClose(out, out2, atol=1e-05) def test_saving_without_compilation(self): if h5py is None: return # Skip test if models cannot be saved. with self.test_session(): model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_shape=(3,))) model.add(keras.layers.Dense(3)) model.compile(loss='mse', optimizer='sgd', metrics=['acc']) _, fname = tempfile.mkstemp('.h5') keras.models.save_model(model, fname) model = keras.models.load_model(fname) os.remove(fname) def test_saving_right_after_compilation(self): if h5py is None: return # Skip test if models cannot be saved. with self.test_session(): model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_shape=(3,))) model.add(keras.layers.Dense(3)) model.compile(loss='mse', optimizer='sgd', metrics=['acc']) model.model._make_train_function() _, fname = tempfile.mkstemp('.h5') keras.models.save_model(model, fname) model = keras.models.load_model(fname) os.remove(fname) def test_saving_lambda_numpy_array_arguments(self): if h5py is None: return # Skip test if models cannot be saved. mean = np.random.random((4, 2, 3)) std = np.abs(np.random.random((4, 2, 3))) + 1e-5 inputs = keras.layers.Input(shape=(4, 2, 3)) output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std, arguments={'mu': mean, 'std': std})(inputs) model = keras.models.Model(inputs, output) model.compile(loss='mse', optimizer='sgd', metrics=['acc']) _, fname = tempfile.mkstemp('.h5') keras.models.save_model(model, fname) model = keras.models.load_model(fname) os.remove(fname) self.assertAllClose(mean, model.layers[1].arguments['mu']) self.assertAllClose(std, model.layers[1].arguments['std']) class TestSequential(test.TestCase): """Most Sequential model API tests are covered in `training_test.py`. """ def test_sequential_pop(self): num_hidden = 5 input_dim = 3 batch_size = 5 num_classes = 2 with self.test_session(): model = keras.models.Sequential() model.add(keras.layers.Dense(num_hidden, input_dim=input_dim)) model.add(keras.layers.Dense(num_classes)) model.compile(loss='mse', optimizer='sgd') x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) model.fit(x, y, epochs=1) model.pop() self.assertEqual(len(model.layers), 1) self.assertEqual(model.output_shape, (None, num_hidden)) model.compile(loss='mse', optimizer='sgd') y = np.random.random((batch_size, num_hidden)) model.fit(x, y, epochs=1) if __name__ == '__main__': test.main()
32.865116
80
0.649731
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile import numpy as np from tensorflow.contrib.keras.python import keras from tensorflow.python.platform import test try: import h5py except ImportError: h5py = None class TestModelSaving(test.TestCase): def test_sequential_model_saving(self): if h5py is None: return with self.test_session(): model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_shape=(3,))) model.add(keras.layers.RepeatVector(3)) model.add(keras.layers.TimeDistributed(keras.layers.Dense(3))) model.compile(loss=keras.losses.MSE, optimizer=keras.optimizers.RMSprop(lr=0.0001), metrics=[keras.metrics.categorical_accuracy], sample_weight_mode='temporal') x = np.random.random((1, 3)) y = np.random.random((1, 3, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') keras.models.save_model(model, fname) new_model = keras.models.load_model(fname) os.remove(fname) out2 = new_model.predict(x) self.assertAllClose(out, out2, atol=1e-05) x = np.random.random((1, 3)) y = np.random.random((1, 3, 3)) model.train_on_batch(x, y) new_model.train_on_batch(x, y) out = model.predict(x) out2 = new_model.predict(x) self.assertAllClose(out, out2, atol=1e-05) def test_sequential_model_saving_2(self): if h5py is None: return with self.test_session(): class CustomOp(keras.optimizers.RMSprop): pass def custom_loss(y_true, y_pred): return keras.losses.mse(y_true, y_pred) model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_shape=(3,))) model.add(keras.layers.Dense(3)) model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc']) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') keras.models.save_model(model, fname) model = keras.models.load_model( fname, custom_objects={'CustomOp': CustomOp, 'custom_loss': custom_loss}) os.remove(fname) out2 = model.predict(x) self.assertAllClose(out, out2, atol=1e-05) def test_functional_model_saving(self): if h5py is None: return with self.test_session(): inputs = keras.layers.Input(shape=(3,)) x = keras.layers.Dense(2)(inputs) output = keras.layers.Dense(3)(x) model = keras.models.Model(inputs, output) model.compile(loss=keras.losses.MSE, optimizer=keras.optimizers.RMSprop(lr=0.0001), metrics=[keras.metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') keras.models.save_model(model, fname) model = keras.models.load_model(fname) os.remove(fname) out2 = model.predict(x) self.assertAllClose(out, out2, atol=1e-05) def test_saving_without_compilation(self): if h5py is None: return with self.test_session(): model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_shape=(3,))) model.add(keras.layers.Dense(3)) model.compile(loss='mse', optimizer='sgd', metrics=['acc']) _, fname = tempfile.mkstemp('.h5') keras.models.save_model(model, fname) model = keras.models.load_model(fname) os.remove(fname) def test_saving_right_after_compilation(self): if h5py is None: return with self.test_session(): model = keras.models.Sequential() model.add(keras.layers.Dense(2, input_shape=(3,))) model.add(keras.layers.Dense(3)) model.compile(loss='mse', optimizer='sgd', metrics=['acc']) model.model._make_train_function() _, fname = tempfile.mkstemp('.h5') keras.models.save_model(model, fname) model = keras.models.load_model(fname) os.remove(fname) def test_saving_lambda_numpy_array_arguments(self): if h5py is None: return mean = np.random.random((4, 2, 3)) std = np.abs(np.random.random((4, 2, 3))) + 1e-5 inputs = keras.layers.Input(shape=(4, 2, 3)) output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std, arguments={'mu': mean, 'std': std})(inputs) model = keras.models.Model(inputs, output) model.compile(loss='mse', optimizer='sgd', metrics=['acc']) _, fname = tempfile.mkstemp('.h5') keras.models.save_model(model, fname) model = keras.models.load_model(fname) os.remove(fname) self.assertAllClose(mean, model.layers[1].arguments['mu']) self.assertAllClose(std, model.layers[1].arguments['std']) class TestSequential(test.TestCase): def test_sequential_pop(self): num_hidden = 5 input_dim = 3 batch_size = 5 num_classes = 2 with self.test_session(): model = keras.models.Sequential() model.add(keras.layers.Dense(num_hidden, input_dim=input_dim)) model.add(keras.layers.Dense(num_classes)) model.compile(loss='mse', optimizer='sgd') x = np.random.random((batch_size, input_dim)) y = np.random.random((batch_size, num_classes)) model.fit(x, y, epochs=1) model.pop() self.assertEqual(len(model.layers), 1) self.assertEqual(model.output_shape, (None, num_hidden)) model.compile(loss='mse', optimizer='sgd') y = np.random.random((batch_size, num_hidden)) model.fit(x, y, epochs=1) if __name__ == '__main__': test.main()
true
true
f72460e93b5cc9a2ba178d43a0407e7595709d8c
399
py
Python
run.py
palazzem/gello
19fe9e4aa8de485dd829a87047ec64f89b5fa7ee
[ "Apache-2.0" ]
44
2018-03-28T14:22:23.000Z
2022-03-15T07:25:06.000Z
run.py
palazzem/gello
19fe9e4aa8de485dd829a87047ec64f89b5fa7ee
[ "Apache-2.0" ]
44
2018-03-28T14:19:03.000Z
2022-02-16T10:24:57.000Z
run.py
palazzem/gello
19fe9e4aa8de485dd829a87047ec64f89b5fa7ee
[ "Apache-2.0" ]
12
2018-03-28T14:15:43.000Z
2021-07-19T17:33:20.000Z
# -*- coding: utf-8 -*- # # Unless explicitly stated otherwise all files in this repository are licensed # under the Apache 2 License. # # This product includes software developed at Datadog # (https://www.datadoghq.com/). # # Copyright 2018 Datadog, Inc. # """run.py Run the application locally by runnnig: `python run.py` """ from app import app if __name__ == '__main__': app.run()
16.625
78
0.689223
from app import app if __name__ == '__main__': app.run()
true
true
f7246108e996bd99236ce11f115ac45903250e54
399
py
Python
code/poc/emergency.py
a10pepo/parrot_ar_drone
af3c15379772a7e86082957776ed8c39193170ec
[ "MIT" ]
null
null
null
code/poc/emergency.py
a10pepo/parrot_ar_drone
af3c15379772a7e86082957776ed8c39193170ec
[ "MIT" ]
3
2021-06-08T20:51:39.000Z
2022-03-12T00:13:08.000Z
code/poc/emergency.py
a10pepo/parrot_ar_drone
af3c15379772a7e86082957776ed8c39193170ec
[ "MIT" ]
1
2020-02-03T16:24:38.000Z
2020-02-03T16:24:38.000Z
# -*- coding: utf-8 -*- """ Created on Wed Dec 18 18:19:15 2019 @author: pepo """ import libardrone import pygame from time import sleep import time import cv2 drone = libardrone.ARDrone() def operation(sleep): t1 = time.time() t2=t1 while t2-t1<sleep: drone.turn_left() t2=time.time() def main(): drone.land() if __name__ == '__main__': main()
12.090909
35
0.606516
import libardrone import pygame from time import sleep import time import cv2 drone = libardrone.ARDrone() def operation(sleep): t1 = time.time() t2=t1 while t2-t1<sleep: drone.turn_left() t2=time.time() def main(): drone.land() if __name__ == '__main__': main()
true
true
f724637d889f2486a08417116366b6d3968dc1b4
503
py
Python
ports/gprs_a9/examples/example_31_ssl.py
sebi5361/micropython
6c054cd124bc6229bee127128264dc0829dea53c
[ "MIT" ]
79
2019-02-07T09:04:50.000Z
2022-02-20T06:54:44.000Z
ports/gprs_a9/examples/example_31_ssl.py
sebi5361/micropython
6c054cd124bc6229bee127128264dc0829dea53c
[ "MIT" ]
100
2019-05-16T09:25:23.000Z
2021-09-20T07:46:54.000Z
ports/gprs_a9/examples/example_31_ssl.py
sebi5361/micropython
6c054cd124bc6229bee127128264dc0829dea53c
[ "MIT" ]
25
2019-03-20T08:16:57.000Z
2022-03-11T17:59:36.000Z
# Micropython a9g example # Source: https://github.com/pulkin/micropython # Author: pulkin # Demonstrates how to wrap sockets into ssl tunnel import cellular import socket import ssl cellular.gprs("internet", "", "") print("IP", socket.get_local_ip()) host = "httpstat.us" port = 443 s = socket.socket() s.connect((host, port)) s = ssl.wrap_socket(s) message = "GET /200 HTTP/1.1\r\nHost: {}\r\nConnection: close\r\n\r\n" s.write(message.format(host)) print(s.read(256)) s.close() cellular.gprs(False)
23.952381
70
0.719682
import cellular import socket import ssl cellular.gprs("internet", "", "") print("IP", socket.get_local_ip()) host = "httpstat.us" port = 443 s = socket.socket() s.connect((host, port)) s = ssl.wrap_socket(s) message = "GET /200 HTTP/1.1\r\nHost: {}\r\nConnection: close\r\n\r\n" s.write(message.format(host)) print(s.read(256)) s.close() cellular.gprs(False)
true
true
f7246452a8b54521f9de02de28a010c82e5a7bdd
675
py
Python
script/dbus_mock.py
lingxiao-Zhu/electron
2d85b1f8f527d55f884904dbfdde50ee66a49830
[ "MIT" ]
88,283
2016-04-04T19:29:13.000Z
2022-03-31T23:33:33.000Z
script/dbus_mock.py
lingxiao-Zhu/electron
2d85b1f8f527d55f884904dbfdde50ee66a49830
[ "MIT" ]
27,327
2016-04-04T19:38:58.000Z
2022-03-31T22:34:10.000Z
script/dbus_mock.py
lingxiao-Zhu/electron
2d85b1f8f527d55f884904dbfdde50ee66a49830
[ "MIT" ]
15,972
2016-04-04T19:32:06.000Z
2022-03-31T08:54:00.000Z
#!/usr/bin/env python import os import subprocess import sys from dbusmock import DBusTestCase from lib.config import is_verbose_mode def stop(): DBusTestCase.stop_dbus(DBusTestCase.system_bus_pid) DBusTestCase.stop_dbus(DBusTestCase.session_bus_pid) def start(): log = sys.stdout if is_verbose_mode() else open(os.devnull, 'w') DBusTestCase.start_system_bus() DBusTestCase.spawn_server_template('logind', None, log) DBusTestCase.start_session_bus() DBusTestCase.spawn_server_template('notification_daemon', None, log) if __name__ == '__main__': start() try: subprocess.check_call(sys.argv[1:]) finally: stop()
22.5
72
0.734815
import os import subprocess import sys from dbusmock import DBusTestCase from lib.config import is_verbose_mode def stop(): DBusTestCase.stop_dbus(DBusTestCase.system_bus_pid) DBusTestCase.stop_dbus(DBusTestCase.session_bus_pid) def start(): log = sys.stdout if is_verbose_mode() else open(os.devnull, 'w') DBusTestCase.start_system_bus() DBusTestCase.spawn_server_template('logind', None, log) DBusTestCase.start_session_bus() DBusTestCase.spawn_server_template('notification_daemon', None, log) if __name__ == '__main__': start() try: subprocess.check_call(sys.argv[1:]) finally: stop()
true
true
f724678f5e7ade231a5e84c6bdf82ae0868ec843
40,659
py
Python
applications/admin/languages/he.py
misl6/web2py
4191d4c48c37c66cc7eb293b610a6b6e86870571
[ "BSD-3-Clause" ]
1
2019-09-05T03:54:51.000Z
2019-09-05T03:54:51.000Z
applications/admin/languages/he.py
misl6/web2py
4191d4c48c37c66cc7eb293b610a6b6e86870571
[ "BSD-3-Clause" ]
null
null
null
applications/admin/languages/he.py
misl6/web2py
4191d4c48c37c66cc7eb293b610a6b6e86870571
[ "BSD-3-Clause" ]
1
2019-09-05T03:54:52.000Z
2019-09-05T03:54:52.000Z
# -*- coding: utf-8 -*- { '!langcode!': 'he-il', '!langname!': 'עברית', '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"עדכן" הוא ביטוי אופציונאלי, כגון "field1=newvalue". אינך יוכל להשתמש בjoin, בעת שימוש ב"עדכן" או "מחק".', '"User Exception" debug mode. ': '"User Exception" debug mode. ', '%s': '%s', '%s %%{row} deleted': '%s רשומות נמחקו', '%s %%{row} updated': '%s רשומות עודכנו', '%s selected': '%s selected', '%s students registered': '%s students registered', '%Y-%m-%d': '%Y-%m-%d', '%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', '(requires internet access)': '(requires internet access)', '(requires internet access, experimental)': '(requires internet access, experimental)', '(something like "it-it")': '(למשל "it-it")', '(version %s)': '(version %s)', '?': '?', '@markmin\x01(**%.0d MB**)': '(**%.0d MB**)', '@markmin\x01(file **gluon/contrib/plural_rules/%s.py** is not found)': '(file **gluon/contrib/plural_rules/%s.py** is not found)', '@markmin\x01**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}', '@markmin\x01**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}', '@markmin\x01**not available** (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)': '**not available** (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)', '@markmin\x01``**not available**``:red (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)': '``**not available**``:red (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)', '@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page', '@markmin\x01Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.', '@markmin\x01DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.', '@markmin\x01Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})', "@markmin\x01Mercurial Version Control System Interface[[NEWLINE]]for application '%s'": "Mercurial Version Control System Interface[[NEWLINE]]for application '%s'", '@markmin\x01Number of entries: **%s**': 'Number of entries: **%s**', '@markmin\x01Please [[refresh %s]] this page to see if a breakpoint was hit and debug interaction is required.': 'Please [[refresh %s]] this page to see if a breakpoint was hit and debug interaction is required.', '@markmin\x01RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.', '@markmin\x01Searching: **%s** %%{file}': 'Searching: **%s** files', '@markmin\x01You need to set up and reach a [[breakpoint %s]] to use the debugger!': 'You need to set up and reach a [[breakpoint %s]] to use the debugger!', 'A new password was emailed to you': 'A new password was emailed to you', 'A new version of web2py is available: %s': 'גירסא חדשה של web2py זמינה: %s', 'Abort': 'Abort', 'About': 'אודות', 'About application': 'אודות אפליקציה', 'Accept Terms': 'Accept Terms', 'Add breakpoint': 'Add breakpoint', 'additional code for your application': 'קוד נוסף עבור האפליקציה שלך', 'Additional code for your application': 'Additional code for your application', 'Admin design page': 'Admin design page', 'admin disabled because no admin password': 'ממשק המנהל מנוטרל כי לא הוגדרה סיסמת מנהל', 'admin disabled because not supported on google app engine': 'ממשק המנהל נוטרל, כי אין תמיכה בGoogle app engine', 'admin disabled because too many invalid login attempts': 'admin disabled because too many invalid login attempts', 'admin disabled because unable to access password file': 'ממשק מנהל נוטרל, כי לא ניתן לגשת לקובץ הסיסמאות', 'Admin is disabled because insecure channel': 'ממשק האדמין נוטרל בשל גישה לא מאובטחת', 'Admin language': 'Admin language', 'Admin versioning page': 'Admin versioning page', 'administrative interface': 'administrative interface', 'Administrator Password:': 'סיסמת מנהל', 'and rename it (required):': 'ושנה את שמו (חובה):', 'and rename it:': 'ושנה את שמו:', 'App does not exist or you are not authorized': 'App does not exist or you are not authorized', 'appadmin': 'מנהל מסד הנתונים', 'appadmin is disabled because insecure channel': 'מנהל מסד הנתונים נוטרל בשל ערוץ לא מאובטח', 'Application': 'Application', 'application "%s" uninstalled': 'אפליקציה "%s" הוסרה', 'Application cannot be generated in demo mode': 'Application cannot be generated in demo mode', 'application compiled': 'אפליקציה קומפלה', 'Application exists already': 'Application exists already', 'application is compiled and cannot be designed': 'לא ניתן לערוך אפליקציה מקומפלת', 'Application name:': 'Application name:', 'Application updated via git pull': 'Application updated via git pull', 'Apply changes': 'Apply changes', 'are not used': 'are not used', 'are not used yet': 'are not used yet', 'Are you sure you want to delete file "%s"?': 'האם אתה בטוח שברצונך למחוק את הקובץ "%s"?', 'Are you sure you want to delete plugin "%s"?': 'האם אתה בטוח שברצונך למחוק את התוסף "%s"?', 'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?', 'Are you sure you want to uninstall application "%s"?': 'האם אתה בטוח שברצונך להסיר את האפליקציה "%s"?', 'Are you sure you want to upgrade web2py now?': 'האם אתה בטוח שאתה רוצה לשדרג את web2py עכשיו?', 'Are you sure?': 'Are you sure?', 'arguments': 'פרמטרים', 'at char %s': 'at char %s', 'at line %s': 'at line %s', 'ATTENTION:': 'ATTENTION:', 'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'לתשומת ליבך: ניתן להתחבר רק בערוץ מאובטח (HTTPS) או מlocalhost', 'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'לתשומת ליבך: אין לערוך מספר בדיקות במקביל, שכן הן עשויות להפריע זו לזו', 'ATTENTION: you cannot edit the running application!': 'לתשומת ליבך: לא ניתן לערוך אפליקציה בזמן הרצתה', 'Authentication code': 'Authentication code', 'Autocomplete Python Code': 'Autocomplete Python Code', 'Available databases and tables': 'מסדי נתונים וטבלאות זמינים', 'Available Databases and Tables': 'Available Databases and Tables', 'back': 'אחורה', 'Back to the plugins list': 'Back to the plugins list', 'Back to wizard': 'Back to wizard', 'Basics': 'Basics', 'Begin': 'Begin', 'breakpoint': 'breakpoint', 'Breakpoints': 'Breakpoints', 'breakpoints': 'breakpoints', 'Bulk Register': 'Bulk Register', 'Bulk Student Registration': 'Bulk Student Registration', 'Cache': 'Cache', 'cache': 'מטמון', 'Cache Cleared': 'Cache Cleared', 'Cache Keys': 'Cache Keys', 'cache, errors and sessions cleaned': 'מטמון, שגיאות וסשן נוקו', 'can be a git repo': 'can be a git repo', 'Cancel': 'Cancel', 'Cannot be empty': 'אינו יכול להישאר ריק', 'Cannot compile: there are errors in your app:': 'לא ניתן לקמפל: ישנן שגיאות באפליקציה שלך:', 'cannot create file': 'לא מצליח ליצור קובץ', 'cannot upload file "%(filename)s"': 'לא הצלחתי להעלות את הקובץ "%(filename)s"', 'Change Admin Password': 'Change Admin Password', 'Change admin password': 'סיסמת מנהל שונתה', 'change editor settings': 'change editor settings', 'Change password': 'Change password', 'Changelog': 'Changelog', 'check all': 'סמן הכל', 'Check for upgrades': 'check for upgrades', 'Check to delete': 'סמן כדי למחוק', 'Checking for upgrades...': 'מחפש עדכונים', 'Clean': 'נקה', 'Clear': 'Clear', 'Clear CACHE?': 'Clear CACHE?', 'Clear DISK': 'Clear DISK', 'Clear RAM': 'Clear RAM', 'Click on the link %(link)s to reset your password': 'Click on the link %(link)s to reset your password', 'Click row to expand traceback': 'Click row to expand traceback', 'Click row to view a ticket': 'Click row to view a ticket', 'click to check for upgrades': 'לחץ כדי לחפש עדכונים', 'Client IP': 'Client IP', 'code': 'קוד', 'Code listing': 'Code listing', 'collapse/expand all': 'collapse/expand all', 'Command': 'Command', 'Comment:': 'Comment:', 'Commit': 'Commit', 'Commit form': 'Commit form', 'Committed files': 'Committed files', 'Compile': 'קמפל', 'Compile (all or nothing)': 'Compile (all or nothing)', 'Compile (skip failed views)': 'Compile (skip failed views)', 'compiled application removed': 'אפליקציה מקומפלת הוסרה', 'Condition': 'Condition', 'continue': 'continue', 'Controllers': 'בקרים', 'controllers': 'בקרים', 'Count': 'Count', 'Create': 'צור', 'create file with filename:': 'צור קובץ בשם:', 'create new application:': 'צור אפליקציה חדשה:', 'Create new simple application': 'צור אפליקציה חדשה', 'Create/Upload': 'Create/Upload', 'created by': 'נוצר ע"י', 'Created by:': 'Created by:', 'Created On': 'Created On', 'Created on:': 'Created on:', 'crontab': 'משימות מתוזמנות', 'Current request': 'בקשה נוכחית', 'Current response': 'מענה נוכחי', 'Current session': 'סשן זה', 'currently running': 'currently running', 'currently saved or': 'נשמר כעת או', 'data uploaded': 'המידע הועלה', 'Database': 'Database', 'database': 'מסד נתונים', 'Database %s select': 'Database %s select', 'database %s select': 'מסד הנתונים %s נבחר', 'Database administration': 'Database administration', 'database administration': 'ניהול מסד נתונים', 'Database Administration (appadmin)': 'Database Administration (appadmin)', 'Date and Time': 'תאריך ושעה', 'db': 'מסד נתונים', 'Debug': 'Debug', 'defines tables': 'הגדר טבלאות', 'Delete': 'מחק', 'delete': 'מחק', 'delete all checked': 'סמן הכל למחיקה', 'delete plugin': 'מחק תוסף', 'Delete this file (you will be asked to confirm deletion)': 'Delete this file (you will be asked to confirm deletion)', 'Delete:': 'מחק:', 'deleted after first hit': 'deleted after first hit', 'Demo': 'Demo', 'Deploy': 'deploy', 'Deploy on Google App Engine': 'העלה ל Google App Engine', 'Deploy to OpenShift': 'Deploy to OpenShift', 'Deploy to pythonanywhere': 'Deploy to pythonanywhere', 'Deploy to PythonAnywhere': 'Deploy to PythonAnywhere', 'Deployment form': 'Deployment form', 'Deployment Interface': 'Deployment Interface', 'Description': 'Description', 'Description:': 'Description:', 'design': 'עיצוב', 'Detailed traceback description': 'Detailed traceback description', 'details': 'details', 'direction: ltr': 'direction: rtl', 'directory not found': 'directory not found', 'Disable': 'Disable', 'Disabled': 'Disabled', 'disabled in demo mode': 'disabled in demo mode', 'disabled in GAE mode': 'disabled in GAE mode', 'disabled in multi user mode': 'disabled in multi user mode', 'DISK': 'DISK', 'Disk Cache Keys': 'Disk Cache Keys', 'Disk Cleared': 'Disk Cleared', 'Display line numbers': 'Display line numbers', 'DO NOT use the "Pack compiled" feature.': 'DO NOT use the "Pack compiled" feature.', 'docs': 'docs', 'Docs': 'Docs', 'done!': 'הסתיים!', 'Downgrade': 'Downgrade', 'Download .w2p': 'Download .w2p', 'Download as .exe': 'Download as .exe', 'download layouts': 'download layouts', 'Download layouts from repository': 'Download layouts from repository', 'download plugins': 'download plugins', 'Download plugins from repository': 'Download plugins from repository', 'E-mail': 'E-mail', 'EDIT': 'ערוך!', 'Edit': 'ערוך', 'edit all': 'edit all', 'Edit application': 'ערוך אפליקציה', 'edit controller': 'ערוך בקר', 'edit controller:': 'edit controller:', 'Edit current record': 'ערוך רשומה נוכחית', 'edit views:': 'ערוך קיבצי תצוגה:', 'Editing %s': 'Editing %s', 'Editing file "%s"': 'עורך את הקובץ "%s"', 'Editing Language file': 'עורך את קובץ השפה', 'Editing Plural Forms File': 'Editing Plural Forms File', 'Editor': 'Editor', 'Email Address': 'Email Address', 'Email sent': 'Email sent', 'Email verification': 'Email verification', 'Email verified': 'Email verified', 'Enable': 'Enable', 'Enable Close-Tag': 'Enable Close-Tag', 'Enable Code Folding': 'Enable Code Folding', 'Enterprise Web Framework': 'סביבת הפיתוח לרשת', 'Error': 'Error', 'Error logs for "%(app)s"': 'דו"ח שגיאות עבור אפליקציה "%(app)s"', 'Error snapshot': 'Error snapshot', 'Error ticket': 'Error ticket', 'Errors': 'שגיאות', 'Exception %(extype)s: %(exvalue)s': 'Exception %(extype)s: %(exvalue)s', 'Exception %s': 'Exception %s', 'Exception instance attributes': 'נתוני החריגה', 'Exit Fullscreen': 'Exit Fullscreen', 'Expand Abbreviation (html files only)': 'Expand Abbreviation (html files only)', 'export as csv file': 'יצא לקובץ csv', 'Exports:': 'Exports:', 'exposes': 'חושף את', 'exposes:': 'exposes:', 'extends': 'הרחבה של', 'failed to compile file because:': 'failed to compile file because:', 'failed to reload module because:': 'נכשל בטעינה חוזרת של מודול בגלל:', 'File': 'File', 'file "%(filename)s" created': 'הקובץ "%(filename)s" נוצר', 'file "%(filename)s" deleted': 'הקובץ "%(filename)s" נמחק', 'file "%(filename)s" uploaded': 'הקובץ "%(filename)s" הועלה', 'file "%s" of %s restored': 'הקובץ "%s" of %s שוחזר', 'file changed on disk': 'קובץ שונה על גבי הדיסק', 'file does not exist': 'קובץ לא נמצא', 'file not found': 'file not found', 'file saved on %(time)s': 'הקובץ נשמר בשעה %(time)s', 'file saved on %s': 'הקובץ נשמר ב%s', 'filename': 'filename', 'Filename': 'Filename', 'Files added': 'Files added', 'filter': 'filter', 'Find Next': 'Find Next', 'Find Previous': 'Find Previous', 'First name': 'First name', 'Form has errors': 'Form has errors', 'Frames': 'Frames', 'Function disabled': 'Function disabled', 'Functions with no doctests will result in [passed] tests.': 'פונקציות שלא הוגדר להן doctest ירשמו כבדיקות ש[עברו בהצלחה].', 'GAE Email': 'GAE Email', 'GAE Output': 'GAE Output', 'GAE Password': 'GAE Password', 'Generate': 'Generate', 'Git Pull': 'Git Pull', 'Git Push': 'Git Push', 'Globals##debug': 'Globals##debug', 'go!': 'go!', 'Google App Engine Deployment Interface': 'Google App Engine Deployment Interface', 'Google Application Id': 'Google Application Id', 'Goto': 'Goto', 'graph model': 'graph model', 'Graph Model': 'Graph Model', 'Group %(group_id)s created': 'Group %(group_id)s created', 'Group %(group_id)s deleted': 'Group %(group_id)s deleted', 'Group ID': 'Group ID', 'Group uniquely assigned to user %(id)s': 'Group uniquely assigned to user %(id)s', 'Help': 'עזרה', 'here': 'here', 'Hide/Show Translated strings': 'Hide/Show Translated strings', 'Highlight current line': 'Highlight current line', 'Hits': 'Hits', 'Home': 'Home', 'honored only if the expression evaluates to true': 'honored only if the expression evaluates to true', 'htmledit': 'עורך ויזואלי', 'If start the downgrade, be patient, it may take a while to rollback': 'If start the downgrade, be patient, it may take a while to rollback', 'If start the upgrade, be patient, it may take a while to download': 'If start the upgrade, be patient, it may take a while to download', 'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\n\t\tA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\n\t\tA green title indicates that all tests (if defined) passed. In this case test results are not shown.', 'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'אם בדו"ח לעיל מופיע מספר דו"ח שגיאה, זה מצביע על שגיאה בבקר, עוד לפני שניתן היה להריץ את הdoctest. לרוב מדובר בשגיאת הזחה, או שגיאה שאינה בקוד של הפונקציה.\r\nכותרת ירוקה מצביע על כך שכל הבדיקות (אם הוגדרו) עברו בהצלחה, במידה ותוצאות הבדיקה אינן מופיעות.', 'if your application uses a database other than sqlite you will then have to configure its DAL in pythonanywhere.': 'if your application uses a database other than sqlite you will then have to configure its DAL in pythonanywhere.', 'import': 'import', 'Import/Export': 'יבא\\יצא', 'In development, use the default Rocket webserver that is currently supported by this debugger.': 'In development, use the default Rocket webserver that is currently supported by this debugger.', 'includes': 'מכיל', 'Incorrect code. {0} more attempt(s) remaining.': 'Incorrect code. {0} more attempt(s) remaining.', 'Indent with tabs': 'Indent with tabs', 'insert new': 'הכנס נוסף', 'insert new %s': 'הכנס %s נוסף', 'inspect attributes': 'inspect attributes', 'Install': 'התקן', 'Installation of %(plugin)s for %(app)s': 'Installation of %(plugin)s for %(app)s', 'Installed applications': 'אפליקציות מותקנות', 'Insufficient privileges': 'Insufficient privileges', 'Interaction at %s line %s': 'Interaction at %s line %s', 'Interactive console': 'Interactive console', 'internal error': 'שגיאה מובנית', 'internal error: %s': 'internal error: %s', 'Internal State': 'מצב מובנה', 'Invalid action': 'הוראה לא קיימת', 'Invalid application name': 'Invalid application name', 'invalid circular reference': 'invalid circular reference', 'Invalid email': 'Invalid email', 'Invalid git repository specified.': 'Invalid git repository specified.', 'Invalid key': 'Invalid key', 'Invalid login': 'Invalid login', 'invalid password': 'סיסמא שגויה', 'Invalid password': 'Invalid password', 'invalid password.': 'invalid password.', 'Invalid Query': 'שאילתה לא תקינה', 'invalid request': 'בקשה לא תקינה', 'Invalid request': 'Invalid request', 'Invalid reset password': 'Invalid reset password', 'invalid table names (auth_* tables already defined)': 'invalid table names (auth_* tables already defined)', 'invalid ticket': 'דו"ח שגיאה לא קיים', 'Invalid user': 'Invalid user', 'Invalid username': 'Invalid username', 'Invitation to join %(site)s': 'Invitation to join %(site)s', 'Key': 'Key', 'Key verified': 'Key verified', 'Keyboard shortcuts': 'Keyboard shortcuts', 'kill process': 'kill process', 'language file "%(filename)s" created/updated': 'קובץ השפה "%(filename)s" נוצר\\עודכן', 'Language files (static strings) updated': 'קובץ השפה (מחרוזות סטאטיות) עודכן', 'languages': 'שפות', 'Languages': 'שפות', 'Last name': 'Last name', 'Last Revision': 'Last Revision', 'Last saved on:': 'לאחרונה נשמר בתאריך:', 'License for': 'רשיון עבור', 'License:': 'License:', 'Line Nr': 'Line Nr', 'Line number': 'Line number', 'lists by exception': 'lists by exception', 'lists by ticket': 'lists by ticket', 'Loading...': 'Loading...', 'loading...': 'טוען...', 'Local Apps': 'Local Apps', 'locals': 'locals', 'Locals##debug': 'Locals##debug', 'Log In': 'Log In', 'Logged in': 'Logged in', 'Logged out': 'Logged out', 'Login': 'התחבר', 'login': 'התחבר', 'Login disabled by administrator': 'Login disabled by administrator', 'Login successful': 'Login successful', 'Login to the Administrative Interface': 'התחבר לממשק המנהל', 'Login/Register': 'Login/Register', 'Logout': 'התנתק', 'lost password': 'lost password', 'Main Menu': 'Main Menu', 'Manage': 'Manage', 'Manage %(action)s': 'Manage %(action)s', 'Manage Access Control': 'Manage Access Control', 'Manage Admin Users/Students': 'Manage Admin Users/Students', 'Manage Cache': 'Manage Cache', 'Manage Students': 'Manage Students', 'Memberships': 'Memberships', 'merge': 'מזג', 'Models': 'מבני נתונים', 'models': 'מבני נתונים', 'Modified On': 'Modified On', 'Modules': 'מודולים', 'modules': 'מודולים', 'Multi User Mode': 'Multi User Mode', 'Name': 'Name', 'new application "%s" created': 'האפליקציה "%s" נוצרה', 'new application "%s" imported': 'new application "%s" imported', 'New Application Wizard': 'New Application Wizard', 'New application wizard': 'New application wizard', 'New password': 'New password', 'new plugin installed': 'פלאגין חדש הותקן', 'New plugin installed: %s': 'New plugin installed: %s', 'New Record': 'רשומה חדשה', 'new record inserted': 'הרשומה נוספה', 'New simple application': 'New simple application', 'next': 'next', 'next %s rows': 'next %s rows', 'next 100 rows': '100 הרשומות הבאות', 'NO': 'לא', 'no changes': 'no changes', 'No databases in this application': 'אין מסדי נתונים לאפליקציה זו', 'No Interaction yet': 'No Interaction yet', 'no match': 'לא נמצאה התאמה', 'no package selected': 'no package selected', 'no permission to uninstall "%s"': 'no permission to uninstall "%s"', 'Node:': 'Node:', 'Not Authorized': 'Not Authorized', 'Not supported': 'Not supported', 'Note: If you receive an error with github status code of 128, ensure the system and account you are deploying from has a cooresponding ssh key configured in the openshift account.': 'Note: If you receive an error with github status code of 128, ensure the system and account you are deploying from has a cooresponding ssh key configured in the openshift account.', 'Object or table name': 'Object or table name', 'Old password': 'Old password', "On production, you'll have to configure your webserver to use one process and multiple threads to use this debugger.": "On production, you'll have to configure your webserver to use one process and multiple threads to use this debugger.", 'Open new app in new window': 'Open new app in new window', 'OpenShift Deployment Interface': 'OpenShift Deployment Interface', 'OpenShift Output': 'OpenShift Output', 'or alternatively': 'or alternatively', 'Or Get from URL:': 'Or Get from URL:', 'or import from csv file': 'או יבא מקובץ csv', 'or provide app url:': 'או ספק כתובת url של אפליקציה', 'Origin': 'Origin', 'Original/Translation': 'מקור\\תרגום', 'Overview': 'Overview', 'Overwrite installed app': 'התקן על גבי אפלקציה מותקנת', 'Pack all': 'ארוז הכל', 'Pack compiled': 'ארוז מקומפל', 'Pack custom': 'Pack custom', 'pack plugin': 'ארוז תוסף', 'PAM authenticated user, cannot change password here': 'שינוי סיסמא באמצעות PAM אינו יכול להתבצע כאן', 'Password': 'Password', 'password changed': 'סיסמא שונתה', 'Password changed': 'Password changed', "Password fields don't match": "Password fields don't match", 'Password reset': 'Password reset', 'Password retrieve': 'Password retrieve', 'Past revisions': 'Past revisions', 'Path to appcfg.py': 'Path to appcfg.py', 'Path to local openshift repo root.': 'Path to local openshift repo root.', 'Peeking at file': 'מעיין בקובץ', 'Permission': 'Permission', 'Permissions': 'Permissions', 'Please': 'Please', 'please input your password again': 'please input your password again', 'Please wait, giving pythonanywhere a moment...': 'Please wait, giving pythonanywhere a moment...', 'plugin "%(plugin)s" deleted': 'תוסף "%(plugin)s" נמחק', 'Plugin "%s" in application': 'פלאגין "%s" של אפליקציה', 'plugin not specified': 'plugin not specified', 'Plugin page': 'Plugin page', 'plugins': 'plugins', 'Plugins': 'תוספים', 'Plural Form #%s': 'Plural Form #%s', 'Plural-Forms:': 'Plural-Forms:', 'Powered by': 'מופעל ע"י', 'Preferences saved correctly': 'Preferences saved correctly', 'Preferences saved on session only': 'Preferences saved on session only', 'previous %s rows': 'previous %s rows', 'previous 100 rows': '100 הרשומות הקודמות', 'Private files': 'Private files', 'private files': 'private files', 'Profile updated': 'Profile updated', 'Project Progress': 'Project Progress', 'Pull': 'Pull', 'Pull failed, certain files could not be checked out. Check logs for details.': 'Pull failed, certain files could not be checked out. Check logs for details.', 'Pull is not possible because you have unmerged files. Fix them up in the work tree, and then try again.': 'Pull is not possible because you have unmerged files. Fix them up in the work tree, and then try again.', 'Push': 'Push', 'Push failed, there are unmerged entries in the cache. Resolve merge issues manually and try again.': 'Push failed, there are unmerged entries in the cache. Resolve merge issues manually and try again.', 'pygraphviz library not found': 'pygraphviz library not found', 'PythonAnywhere Apps': 'PythonAnywhere Apps', 'PythonAnywhere Password': 'PythonAnywhere Password', 'Query:': 'שאילתה:', 'RAM': 'RAM', 'RAM Cache Keys': 'RAM Cache Keys', 'Ram Cleared': 'Ram Cleared', 'Rapid Search': 'Rapid Search', 'Record': 'Record', 'record': 'רשומה', 'Record %(id)s created': 'Record %(id)s created', 'Record %(id)s deleted': 'Record %(id)s deleted', 'Record %(id)s read': 'Record %(id)s read', 'Record %(id)s updated': 'Record %(id)s updated', 'Record Created': 'Record Created', 'Record Deleted': 'Record Deleted', 'record does not exist': 'הרשומה אינה קיימת', 'record id': 'מזהה רשומה', 'Record id': 'Record id', 'Record ID': 'Record ID', 'Record Updated': 'Record Updated', 'refresh': 'refresh', 'register': 'register', 'Registration identifier': 'Registration identifier', 'Registration is pending approval': 'Registration is pending approval', 'Registration key': 'Registration key', 'Registration needs verification': 'Registration needs verification', 'Registration successful': 'Registration successful', 'Reload routes': 'Reload routes', 'Remember me (for 30 days)': 'Remember me (for 30 days)', 'Remove compiled': 'הסר מקומפל', 'Removed Breakpoint on %s at line %s': 'Removed Breakpoint on %s at line %s', 'Replace': 'Replace', 'Replace All': 'Replace All', 'Repository (%s)': 'Repository (%s)', 'request': 'request', 'Request reset password': 'Request reset password', 'requires distutils, but not installed': 'requires distutils, but not installed', 'requires python-git, but not installed': 'requires python-git, but not installed', 'Reset Password key': 'Reset Password key', 'Resolve Conflict file': 'הסר קובץ היוצר קונפליקט', 'response': 'response', 'restart': 'restart', 'restore': 'שחזר', 'return': 'return', 'Revert': 'Revert', 'revert': 'חזור לגירסא קודמת', 'reverted to revision %s': 'reverted to revision %s', 'Revision %s': 'Revision %s', 'Revision:': 'Revision:', 'Role': 'Role', 'Roles': 'Roles', 'Rows in Table': 'Rows in Table', 'Rows in table': 'רשומות בטבלה', 'Rows selected': 'רשומות נבחרו', 'rules are not defined': 'rules are not defined', 'Run tests': 'Run tests', 'Run tests in this file': 'Run tests in this file', "Run tests in this file (to run all files, you may also use the button labelled 'test')": "Run tests in this file (to run all files, you may also use the button labelled 'test')", 'Running on %s': 'Running on %s', 'Save': 'Save', 'Save file:': 'Save file:', 'Save file: %s': 'Save file: %s', 'Save model as...': 'Save model as...', 'Save via Ajax': 'Save via Ajax', 'Saved file hash:': 'גיבוב הקובץ השמור:', 'Screenshot %s': 'Screenshot %s', 'Search': 'Search', 'Select Files to Package': 'Select Files to Package', 'selected': 'נבחרו', 'session': 'session', 'session expired': 'תם הסשן', 'Session saved correctly': 'Session saved correctly', 'Session saved on session only': 'Session saved on session only', 'Set Breakpoint on %s at line %s: %s': 'Set Breakpoint on %s at line %s: %s', 'shell': 'שורת פקודה', 'Showing %s to %s of %s %s found': 'Showing %s to %s of %s %s found', 'Sign Up': 'Sign Up', 'Singular Form': 'Singular Form', 'Site': 'אתר', 'Size of cache:': 'Size of cache:', 'skip to generate': 'skip to generate', 'some files could not be removed': 'לא ניתן היה להסיר חלק מהקבצים', 'Something went wrong please wait a few minutes before retrying': 'Something went wrong please wait a few minutes before retrying', 'Sorry, could not find mercurial installed': 'Sorry, could not find mercurial installed', 'source : db': 'source : db', 'source : filesystem': 'source : filesystem', 'Start a new app': 'Start a new app', 'Start searching': 'Start searching', 'Start wizard': 'start wizard', 'state': 'מצב', 'Static': 'Static', 'static': 'קבצים סטאטיים', 'Static files': 'קבצים סטאטיים', 'Statistics': 'Statistics', 'Step': 'Step', 'step': 'step', 'stop': 'stop', 'submit': 'שלח', 'Submit': 'Submit', 'successful': 'successful', 'Sure you want to delete this object?': 'האם אתה בטוח שברצונך למחוק אובייקט זה?', 'switch to : db': 'switch to : db', 'switch to : filesystem': 'switch to : filesystem', 'Tab width (# characters)': 'Tab width (# characters)', 'table': 'טבלה', 'Table': 'Table', 'Temporary': 'Temporary', 'test': 'בדיקות', 'Testing application': 'בודק את האפליקציה', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"שאליתה" היא תנאי כגון "db1.table1.filed1=\'value\'" ביטוי כמו db.table1.field1=db.table2.field1 יחולל join', 'The app exists, was created by wizard, continue to overwrite!': 'The app exists, was created by wizard, continue to overwrite!', 'The app exists, was NOT created by wizard, continue to overwrite!': 'The app exists, was NOT created by wizard, continue to overwrite!', 'the application logic, each URL path is mapped in one exposed function in the controller': 'הלוגיקה של האפליקציה, כל url ממופה לפונקציה חשופה בבקר', 'The application logic, each URL path is mapped in one exposed function in the controller': 'The application logic, each URL path is mapped in one exposed function in the controller', 'the data representation, define database tables and sets': 'ייצוג המידע, בו מוגדרים טבלאות ומבנים', 'The data representation, define database tables and sets': 'The data representation, define database tables and sets', 'The presentations layer, views are also known as templates': 'The presentations layer, views are also known as templates', 'the presentations layer, views are also known as templates': 'שכבת התצוגה, המכונה גם template', 'Theme': 'Theme', 'There are no controllers': 'אין בקרים', 'There are no models': 'אין מבני נתונים', 'There are no modules': 'אין מודולים', 'There are no plugins': 'There are no plugins', 'There are no private files': 'There are no private files', 'There are no static files': 'אין קבצים סטאטיים', 'There are no translators': 'There are no translators', 'There are no translators, only default language is supported': 'אין תרגומים. רק שפת ברירת המחדל נתמכת', 'There are no views': 'אין קבצי תצוגה', 'These files are not served, they are only available from within your app': 'These files are not served, they are only available from within your app', 'These files are served without processing, your images go here': 'These files are served without processing, your images go here', 'these files are served without processing, your images go here': 'אלו הם קבצים הנשלחים מהשרת ללא עיבוד. הכנס את התמונות כאן', 'This code was emailed to you and is required for login.': 'This code was emailed to you and is required for login.', "This debugger may not work properly if you don't have a threaded webserver or you're using multiple daemon processes.": "This debugger may not work properly if you don't have a threaded webserver or you're using multiple daemon processes.", 'This email already has an account': 'This email already has an account', 'This is an experimental feature and it needs more testing. If you decide to downgrade you do it at your own risk': 'This is an experimental feature and it needs more testing. If you decide to downgrade you do it at your own risk', 'This is an experimental feature and it needs more testing. If you decide to upgrade you do it at your own risk': 'This is an experimental feature and it needs more testing. If you decide to upgrade you do it at your own risk', 'This is the %(filename)s template': 'זוהי תבנית הקובץ %(filename)s ', "This page can commit your changes to an openshift app repo and push them to your cloud instance. This assumes that you've already created the application instance using the web2py skeleton and have that repo somewhere on a filesystem that this web2py instance can access. This functionality requires GitPython installed and on the python path of the runtime that web2py is operating in.": "This page can commit your changes to an openshift app repo and push them to your cloud instance. This assumes that you've already created the application instance using the web2py skeleton and have that repo somewhere on a filesystem that this web2py instance can access. This functionality requires GitPython installed and on the python path of the runtime that web2py is operating in.", 'This page can upload your application to the Google App Engine computing cloud. Mind that you must first create indexes locally and this is done by installing the Google appserver and running the app locally with it once, or there will be errors when selecting records. Attention: deployment may take long time, depending on the network speed. Attention: it will overwrite your app.yaml. DO NOT SUBMIT TWICE.': 'This page can upload your application to the Google App Engine computing cloud. Mind that you must first create indexes locally and this is done by installing the Google appserver and running the app locally with it once, or there will be errors when selecting records. Attention: deployment may take long time, depending on the network speed. Attention: it will overwrite your app.yaml. DO NOT SUBMIT TWICE.', 'this page to see if a breakpoint was hit and debug interaction is required.': 'this page to see if a breakpoint was hit and debug interaction is required.', 'This will pull changes from the remote repo for application "%s"?': 'This will pull changes from the remote repo for application "%s"?', 'This will push changes to the remote repo for application "%s".': 'This will push changes to the remote repo for application "%s".', 'Ticket': 'דו"ח שגיאה', 'Ticket ID': 'Ticket ID', 'Ticket Missing': 'Ticket Missing', 'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)', 'Timestamp': 'Timestamp', 'TM': 'סימן רשום', 'to previous version.': 'אין גירסא קודמת', 'To create a plugin, name a file/folder plugin_[name]': 'כדי ליצור תוסף, קרא לקובץ או סיפריה בשם לפי התבנית plugin_[name]', 'To emulate a breakpoint programatically, write:': 'To emulate a breakpoint programatically, write:', 'to use the debugger!': 'to use the debugger!', 'toggle breakpoint': 'toggle breakpoint', 'Toggle comment': 'Toggle comment', 'Toggle Fullscreen': 'Toggle Fullscreen', 'Traceback': 'Traceback', 'translation strings for the application': 'מחרוזות תרגום עבור האפליקציה', 'Translation strings for the application': 'Translation strings for the application', 'try': 'נסה', 'try something like': 'נסה משהו כמו', 'Try the mobile interface': 'Try the mobile interface', 'try view': 'try view', 'Two-step Login Authentication Code': 'Two-step Login Authentication Code', 'Type PDB debugger command in here and hit Return (Enter) to execute it.': 'Type PDB debugger command in here and hit Return (Enter) to execute it.', 'Type some Python code in here and hit Return (Enter) to execute it.': 'Type some Python code in here and hit Return (Enter) to execute it.', 'Unable to check for upgrades': 'לא ניתן היה לבדוק אם יש שדרוגים', 'unable to create application "%s"': 'נכשל ביצירת האפליקציה "%s"', 'unable to delete file "%(filename)s"': 'נכשל במחיקת הקובץ "%(filename)s"', 'unable to delete file plugin "%(plugin)s"': 'נכשל במחיקת התוסף "%(plugin)s"', 'Unable to determine the line number!': 'Unable to determine the line number!', 'Unable to download app because:': 'לא ניתן היה להוריד את האפליקציה כי:', 'Unable to download because': 'לא הצלחתי להוריד כי', 'unable to download layout': 'unable to download layout', 'unable to download plugin: %s': 'unable to download plugin: %s', 'Unable to download the list of plugins': 'Unable to download the list of plugins', 'unable to install plugin "%s"': 'unable to install plugin "%s"', 'unable to parse csv file': 'לא הצלחתי לנתח את הקלט של קובץ csv', 'Unable to send email': 'Unable to send email', 'unable to uninstall "%s"': 'לא ניתן להסיר את "%s"', 'unable to upgrade because "%s"': 'לא ניתן היה לשדרג כי "%s"', 'uncheck all': 'הסר סימון מהכל', 'Uninstall': 'הסר התקנה', 'Unsupported webserver working mode: %s': 'Unsupported webserver working mode: %s', 'update': 'עדכן', 'update all languages': 'עדכן את כלל קיבצי השפה', 'Update:': 'עדכן:', 'Upgrade': 'Upgrade', 'upgrade now': 'upgrade now', 'upgrade now to %s': 'upgrade now to %s', 'upgrade web2py now': 'שדרג את web2py עכשיו', 'upload': 'upload', 'Upload': 'Upload', 'Upload & install packed application': 'העלה והתקן אפליקציה ארוזה', 'Upload a package:': 'Upload a package:', 'Upload and install packed application': 'Upload and install packed application', 'upload application:': 'העלה אפליקציה:', 'upload file:': 'העלה קובץ:', 'upload plugin file:': 'העלה קובץ תוסף:', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'השתמש ב (...)&(...) עבור תנאי AND, (...)|(...) עבור תנאי OR ו~(...) עבור תנאי NOT ליצירת שאילתות מורכבות', 'Use an url:': 'Use an url:', 'User': 'User', 'User %(id)s is impersonating %(other_id)s': 'User %(id)s is impersonating %(other_id)s', 'User %(id)s Logged-in': 'User %(id)s Logged-in', 'User %(id)s Logged-out': 'User %(id)s Logged-out', 'User %(id)s Password changed': 'User %(id)s Password changed', 'User %(id)s Password reset': 'User %(id)s Password reset', 'User %(id)s Password retrieved': 'User %(id)s Password retrieved', 'User %(id)s Profile updated': 'User %(id)s Profile updated', 'User %(id)s Registered': 'User %(id)s Registered', 'User %(id)s Username retrieved': 'User %(id)s Username retrieved', 'User %(id)s Verification email sent': 'User %(id)s Verification email sent', 'User %(id)s verified registration key': 'User %(id)s verified registration key', 'User ID': 'User ID', 'Username': 'Username', 'Username already taken': 'Username already taken', 'Username retrieve': 'Username retrieve', 'Users': 'Users', 'Using the shell may lock the database to other users of this app.': 'Using the shell may lock the database to other users of this app.', 'variables': 'משתנים', 'Verify Password': 'Verify Password', 'Version': 'גירסא', 'Versioning': 'Versioning', 'versioning': 'מנגנון גירסאות', 'view': 'הצג', 'Views': 'מראה', 'views': 'מראה', 'Warning!': 'Warning!', 'WARNING:': 'WARNING:', 'WARNING: The following views could not be compiled:': 'WARNING: The following views could not be compiled:', 'Web Framework': 'Web Framework', 'web2py Admin Password': 'web2py Admin Password', 'web2py apps to deploy': 'web2py apps to deploy', 'web2py Debugger': 'web2py Debugger', 'web2py downgrade': 'web2py downgrade', 'web2py is up to date': 'web2py מותקנת בגירסתה האחרונה', 'web2py online debugger': 'web2py online debugger', 'web2py Recent Tweets': 'ציוצים אחרונים של web2py', 'web2py upgrade': 'web2py upgrade', 'web2py upgraded; please restart it': 'web2py שודרגה; נא אתחל אותה', 'Welcome %(username)s! Click on the link %(link)s to verify your email': 'Welcome %(username)s! Click on the link %(link)s to verify your email', 'Working...': 'Working...', 'WSGI reference name': 'WSGI reference name', 'YES': 'כן', 'Yes': 'Yes', 'You can also set and remove breakpoint in the edit window, using the Toggle Breakpoint button': 'You can also set and remove breakpoint in the edit window, using the Toggle Breakpoint button', 'You can inspect variables using the console below': 'You can inspect variables using the console below', 'You have been invited to join %(site)s, click %(link)s to complete the process': 'You have been invited to join %(site)s, click %(link)s to complete the process', 'You have one more login attempt before you are locked out': 'You have one more login attempt before you are locked out', 'You need to set up and reach a': 'You need to set up and reach a', 'You only need these if you have already registered': 'You only need these if you have already registered', 'Your application will be blocked until you click an action button (next, step, continue, etc.)': 'Your application will be blocked until you click an action button (next, step, continue, etc.)', 'Your password is: %(password)s': 'Your password is: %(password)s', 'Your temporary login code is {0}': 'Your temporary login code is {0}', 'Your username is: %(username)s': 'Your username is: %(username)s', 'Your username was emailed to you': 'Your username was emailed to you', }
56.004132
823
0.710618
{ '!langcode!': 'he-il', '!langname!': 'עברית', '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"עדכן" הוא ביטוי אופציונאלי, כגון "field1=newvalue". אינך יוכל להשתמש בjoin, בעת שימוש ב"עדכן" או "מחק".', '"User Exception" debug mode. ': '"User Exception" debug mode. ', '%s': '%s', '%s %%{row} deleted': '%s רשומות נמחקו', '%s %%{row} updated': '%s רשומות עודכנו', '%s selected': '%s selected', '%s students registered': '%s students registered', '%Y-%m-%d': '%Y-%m-%d', '%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', '(requires internet access)': '(requires internet access)', '(requires internet access, experimental)': '(requires internet access, experimental)', '(something like "it-it")': '(למשל "it-it")', '(version %s)': '(version %s)', '?': '?', '@markmin\x01(**%.0d MB**)': '(**%.0d MB**)', '@markmin\x01(file **gluon/contrib/plural_rules/%s.py** is not found)': '(file **gluon/contrib/plural_rules/%s.py** is not found)', '@markmin\x01**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}', '@markmin\x01**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}', '@markmin\x01**not available** (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)': '**not available** (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)', '@markmin\x01``**not available**``:red (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)': '``**not available**``:red (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)', '@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page', '@markmin\x01Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.', '@markmin\x01DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.', '@markmin\x01Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})', "@markmin\x01Mercurial Version Control System Interface[[NEWLINE]]for application '%s'": "Mercurial Version Control System Interface[[NEWLINE]]for application '%s'", '@markmin\x01Number of entries: **%s**': 'Number of entries: **%s**', '@markmin\x01Please [[refresh %s]] this page to see if a breakpoint was hit and debug interaction is required.': 'Please [[refresh %s]] this page to see if a breakpoint was hit and debug interaction is required.', '@markmin\x01RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.', '@markmin\x01Searching: **%s** %%{file}': 'Searching: **%s** files', '@markmin\x01You need to set up and reach a [[breakpoint %s]] to use the debugger!': 'You need to set up and reach a [[breakpoint %s]] to use the debugger!', 'A new password was emailed to you': 'A new password was emailed to you', 'A new version of web2py is available: %s': 'גירסא חדשה של web2py זמינה: %s', 'Abort': 'Abort', 'About': 'אודות', 'About application': 'אודות אפליקציה', 'Accept Terms': 'Accept Terms', 'Add breakpoint': 'Add breakpoint', 'additional code for your application': 'קוד נוסף עבור האפליקציה שלך', 'Additional code for your application': 'Additional code for your application', 'Admin design page': 'Admin design page', 'admin disabled because no admin password': 'ממשק המנהל מנוטרל כי לא הוגדרה סיסמת מנהל', 'admin disabled because not supported on google app engine': 'ממשק המנהל נוטרל, כי אין תמיכה בGoogle app engine', 'admin disabled because too many invalid login attempts': 'admin disabled because too many invalid login attempts', 'admin disabled because unable to access password file': 'ממשק מנהל נוטרל, כי לא ניתן לגשת לקובץ הסיסמאות', 'Admin is disabled because insecure channel': 'ממשק האדמין נוטרל בשל גישה לא מאובטחת', 'Admin language': 'Admin language', 'Admin versioning page': 'Admin versioning page', 'administrative interface': 'administrative interface', 'Administrator Password:': 'סיסמת מנהל', 'and rename it (required):': 'ושנה את שמו (חובה):', 'and rename it:': 'ושנה את שמו:', 'App does not exist or you are not authorized': 'App does not exist or you are not authorized', 'appadmin': 'מנהל מסד הנתונים', 'appadmin is disabled because insecure channel': 'מנהל מסד הנתונים נוטרל בשל ערוץ לא מאובטח', 'Application': 'Application', 'application "%s" uninstalled': 'אפליקציה "%s" הוסרה', 'Application cannot be generated in demo mode': 'Application cannot be generated in demo mode', 'application compiled': 'אפליקציה קומפלה', 'Application exists already': 'Application exists already', 'application is compiled and cannot be designed': 'לא ניתן לערוך אפליקציה מקומפלת', 'Application name:': 'Application name:', 'Application updated via git pull': 'Application updated via git pull', 'Apply changes': 'Apply changes', 'are not used': 'are not used', 'are not used yet': 'are not used yet', 'Are you sure you want to delete file "%s"?': 'האם אתה בטוח שברצונך למחוק את הקובץ "%s"?', 'Are you sure you want to delete plugin "%s"?': 'האם אתה בטוח שברצונך למחוק את התוסף "%s"?', 'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?', 'Are you sure you want to uninstall application "%s"?': 'האם אתה בטוח שברצונך להסיר את האפליקציה "%s"?', 'Are you sure you want to upgrade web2py now?': 'האם אתה בטוח שאתה רוצה לשדרג את web2py עכשיו?', 'Are you sure?': 'Are you sure?', 'arguments': 'פרמטרים', 'at char %s': 'at char %s', 'at line %s': 'at line %s', 'ATTENTION:': 'ATTENTION:', 'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'לתשומת ליבך: ניתן להתחבר רק בערוץ מאובטח (HTTPS) או מlocalhost', 'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'לתשומת ליבך: אין לערוך מספר בדיקות במקביל, שכן הן עשויות להפריע זו לזו', 'ATTENTION: you cannot edit the running application!': 'לתשומת ליבך: לא ניתן לערוך אפליקציה בזמן הרצתה', 'Authentication code': 'Authentication code', 'Autocomplete Python Code': 'Autocomplete Python Code', 'Available databases and tables': 'מסדי נתונים וטבלאות זמינים', 'Available Databases and Tables': 'Available Databases and Tables', 'back': 'אחורה', 'Back to the plugins list': 'Back to the plugins list', 'Back to wizard': 'Back to wizard', 'Basics': 'Basics', 'Begin': 'Begin', 'breakpoint': 'breakpoint', 'Breakpoints': 'Breakpoints', 'breakpoints': 'breakpoints', 'Bulk Register': 'Bulk Register', 'Bulk Student Registration': 'Bulk Student Registration', 'Cache': 'Cache', 'cache': 'מטמון', 'Cache Cleared': 'Cache Cleared', 'Cache Keys': 'Cache Keys', 'cache, errors and sessions cleaned': 'מטמון, שגיאות וסשן נוקו', 'can be a git repo': 'can be a git repo', 'Cancel': 'Cancel', 'Cannot be empty': 'אינו יכול להישאר ריק', 'Cannot compile: there are errors in your app:': 'לא ניתן לקמפל: ישנן שגיאות באפליקציה שלך:', 'cannot create file': 'לא מצליח ליצור קובץ', 'cannot upload file "%(filename)s"': 'לא הצלחתי להעלות את הקובץ "%(filename)s"', 'Change Admin Password': 'Change Admin Password', 'Change admin password': 'סיסמת מנהל שונתה', 'change editor settings': 'change editor settings', 'Change password': 'Change password', 'Changelog': 'Changelog', 'check all': 'סמן הכל', 'Check for upgrades': 'check for upgrades', 'Check to delete': 'סמן כדי למחוק', 'Checking for upgrades...': 'מחפש עדכונים', 'Clean': 'נקה', 'Clear': 'Clear', 'Clear CACHE?': 'Clear CACHE?', 'Clear DISK': 'Clear DISK', 'Clear RAM': 'Clear RAM', 'Click on the link %(link)s to reset your password': 'Click on the link %(link)s to reset your password', 'Click row to expand traceback': 'Click row to expand traceback', 'Click row to view a ticket': 'Click row to view a ticket', 'click to check for upgrades': 'לחץ כדי לחפש עדכונים', 'Client IP': 'Client IP', 'code': 'קוד', 'Code listing': 'Code listing', 'collapse/expand all': 'collapse/expand all', 'Command': 'Command', 'Comment:': 'Comment:', 'Commit': 'Commit', 'Commit form': 'Commit form', 'Committed files': 'Committed files', 'Compile': 'קמפל', 'Compile (all or nothing)': 'Compile (all or nothing)', 'Compile (skip failed views)': 'Compile (skip failed views)', 'compiled application removed': 'אפליקציה מקומפלת הוסרה', 'Condition': 'Condition', 'continue': 'continue', 'Controllers': 'בקרים', 'controllers': 'בקרים', 'Count': 'Count', 'Create': 'צור', 'create file with filename:': 'צור קובץ בשם:', 'create new application:': 'צור אפליקציה חדשה:', 'Create new simple application': 'צור אפליקציה חדשה', 'Create/Upload': 'Create/Upload', 'created by': 'נוצר ע"י', 'Created by:': 'Created by:', 'Created On': 'Created On', 'Created on:': 'Created on:', 'crontab': 'משימות מתוזמנות', 'Current request': 'בקשה נוכחית', 'Current response': 'מענה נוכחי', 'Current session': 'סשן זה', 'currently running': 'currently running', 'currently saved or': 'נשמר כעת או', 'data uploaded': 'המידע הועלה', 'Database': 'Database', 'database': 'מסד נתונים', 'Database %s select': 'Database %s select', 'database %s select': 'מסד הנתונים %s נבחר', 'Database administration': 'Database administration', 'database administration': 'ניהול מסד נתונים', 'Database Administration (appadmin)': 'Database Administration (appadmin)', 'Date and Time': 'תאריך ושעה', 'db': 'מסד נתונים', 'Debug': 'Debug', 'defines tables': 'הגדר טבלאות', 'Delete': 'מחק', 'delete': 'מחק', 'delete all checked': 'סמן הכל למחיקה', 'delete plugin': 'מחק תוסף', 'Delete this file (you will be asked to confirm deletion)': 'Delete this file (you will be asked to confirm deletion)', 'Delete:': 'מחק:', 'deleted after first hit': 'deleted after first hit', 'Demo': 'Demo', 'Deploy': 'deploy', 'Deploy on Google App Engine': 'העלה ל Google App Engine', 'Deploy to OpenShift': 'Deploy to OpenShift', 'Deploy to pythonanywhere': 'Deploy to pythonanywhere', 'Deploy to PythonAnywhere': 'Deploy to PythonAnywhere', 'Deployment form': 'Deployment form', 'Deployment Interface': 'Deployment Interface', 'Description': 'Description', 'Description:': 'Description:', 'design': 'עיצוב', 'Detailed traceback description': 'Detailed traceback description', 'details': 'details', 'direction: ltr': 'direction: rtl', 'directory not found': 'directory not found', 'Disable': 'Disable', 'Disabled': 'Disabled', 'disabled in demo mode': 'disabled in demo mode', 'disabled in GAE mode': 'disabled in GAE mode', 'disabled in multi user mode': 'disabled in multi user mode', 'DISK': 'DISK', 'Disk Cache Keys': 'Disk Cache Keys', 'Disk Cleared': 'Disk Cleared', 'Display line numbers': 'Display line numbers', 'DO NOT use the "Pack compiled" feature.': 'DO NOT use the "Pack compiled" feature.', 'docs': 'docs', 'Docs': 'Docs', 'done!': 'הסתיים!', 'Downgrade': 'Downgrade', 'Download .w2p': 'Download .w2p', 'Download as .exe': 'Download as .exe', 'download layouts': 'download layouts', 'Download layouts from repository': 'Download layouts from repository', 'download plugins': 'download plugins', 'Download plugins from repository': 'Download plugins from repository', 'E-mail': 'E-mail', 'EDIT': 'ערוך!', 'Edit': 'ערוך', 'edit all': 'edit all', 'Edit application': 'ערוך אפליקציה', 'edit controller': 'ערוך בקר', 'edit controller:': 'edit controller:', 'Edit current record': 'ערוך רשומה נוכחית', 'edit views:': 'ערוך קיבצי תצוגה:', 'Editing %s': 'Editing %s', 'Editing file "%s"': 'עורך את הקובץ "%s"', 'Editing Language file': 'עורך את קובץ השפה', 'Editing Plural Forms File': 'Editing Plural Forms File', 'Editor': 'Editor', 'Email Address': 'Email Address', 'Email sent': 'Email sent', 'Email verification': 'Email verification', 'Email verified': 'Email verified', 'Enable': 'Enable', 'Enable Close-Tag': 'Enable Close-Tag', 'Enable Code Folding': 'Enable Code Folding', 'Enterprise Web Framework': 'סביבת הפיתוח לרשת', 'Error': 'Error', 'Error logs for "%(app)s"': 'דו"ח שגיאות עבור אפליקציה "%(app)s"', 'Error snapshot': 'Error snapshot', 'Error ticket': 'Error ticket', 'Errors': 'שגיאות', 'Exception %(extype)s: %(exvalue)s': 'Exception %(extype)s: %(exvalue)s', 'Exception %s': 'Exception %s', 'Exception instance attributes': 'נתוני החריגה', 'Exit Fullscreen': 'Exit Fullscreen', 'Expand Abbreviation (html files only)': 'Expand Abbreviation (html files only)', 'export as csv file': 'יצא לקובץ csv', 'Exports:': 'Exports:', 'exposes': 'חושף את', 'exposes:': 'exposes:', 'extends': 'הרחבה של', 'failed to compile file because:': 'failed to compile file because:', 'failed to reload module because:': 'נכשל בטעינה חוזרת של מודול בגלל:', 'File': 'File', 'file "%(filename)s" created': 'הקובץ "%(filename)s" נוצר', 'file "%(filename)s" deleted': 'הקובץ "%(filename)s" נמחק', 'file "%(filename)s" uploaded': 'הקובץ "%(filename)s" הועלה', 'file "%s" of %s restored': 'הקובץ "%s" of %s שוחזר', 'file changed on disk': 'קובץ שונה על גבי הדיסק', 'file does not exist': 'קובץ לא נמצא', 'file not found': 'file not found', 'file saved on %(time)s': 'הקובץ נשמר בשעה %(time)s', 'file saved on %s': 'הקובץ נשמר ב%s', 'filename': 'filename', 'Filename': 'Filename', 'Files added': 'Files added', 'filter': 'filter', 'Find Next': 'Find Next', 'Find Previous': 'Find Previous', 'First name': 'First name', 'Form has errors': 'Form has errors', 'Frames': 'Frames', 'Function disabled': 'Function disabled', 'Functions with no doctests will result in [passed] tests.': 'פונקציות שלא הוגדר להן doctest ירשמו כבדיקות ש[עברו בהצלחה].', 'GAE Email': 'GAE Email', 'GAE Output': 'GAE Output', 'GAE Password': 'GAE Password', 'Generate': 'Generate', 'Git Pull': 'Git Pull', 'Git Push': 'Git Push', 'Globals##debug': 'Globals##debug', 'go!': 'go!', 'Google App Engine Deployment Interface': 'Google App Engine Deployment Interface', 'Google Application Id': 'Google Application Id', 'Goto': 'Goto', 'graph model': 'graph model', 'Graph Model': 'Graph Model', 'Group %(group_id)s created': 'Group %(group_id)s created', 'Group %(group_id)s deleted': 'Group %(group_id)s deleted', 'Group ID': 'Group ID', 'Group uniquely assigned to user %(id)s': 'Group uniquely assigned to user %(id)s', 'Help': 'עזרה', 'here': 'here', 'Hide/Show Translated strings': 'Hide/Show Translated strings', 'Highlight current line': 'Highlight current line', 'Hits': 'Hits', 'Home': 'Home', 'honored only if the expression evaluates to true': 'honored only if the expression evaluates to true', 'htmledit': 'עורך ויזואלי', 'If start the downgrade, be patient, it may take a while to rollback': 'If start the downgrade, be patient, it may take a while to rollback', 'If start the upgrade, be patient, it may take a while to download': 'If start the upgrade, be patient, it may take a while to download', 'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\n\t\tA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\n\t\tA green title indicates that all tests (if defined) passed. In this case test results are not shown.', 'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'אם בדו"ח לעיל מופיע מספר דו"ח שגיאה, זה מצביע על שגיאה בבקר, עוד לפני שניתן היה להריץ את הdoctest. לרוב מדובר בשגיאת הזחה, או שגיאה שאינה בקוד של הפונקציה.\r\nכותרת ירוקה מצביע על כך שכל הבדיקות (אם הוגדרו) עברו בהצלחה, במידה ותוצאות הבדיקה אינן מופיעות.', 'if your application uses a database other than sqlite you will then have to configure its DAL in pythonanywhere.': 'if your application uses a database other than sqlite you will then have to configure its DAL in pythonanywhere.', 'import': 'import', 'Import/Export': 'יבא\\יצא', 'In development, use the default Rocket webserver that is currently supported by this debugger.': 'In development, use the default Rocket webserver that is currently supported by this debugger.', 'includes': 'מכיל', 'Incorrect code. {0} more attempt(s) remaining.': 'Incorrect code. {0} more attempt(s) remaining.', 'Indent with tabs': 'Indent with tabs', 'insert new': 'הכנס נוסף', 'insert new %s': 'הכנס %s נוסף', 'inspect attributes': 'inspect attributes', 'Install': 'התקן', 'Installation of %(plugin)s for %(app)s': 'Installation of %(plugin)s for %(app)s', 'Installed applications': 'אפליקציות מותקנות', 'Insufficient privileges': 'Insufficient privileges', 'Interaction at %s line %s': 'Interaction at %s line %s', 'Interactive console': 'Interactive console', 'internal error': 'שגיאה מובנית', 'internal error: %s': 'internal error: %s', 'Internal State': 'מצב מובנה', 'Invalid action': 'הוראה לא קיימת', 'Invalid application name': 'Invalid application name', 'invalid circular reference': 'invalid circular reference', 'Invalid email': 'Invalid email', 'Invalid git repository specified.': 'Invalid git repository specified.', 'Invalid key': 'Invalid key', 'Invalid login': 'Invalid login', 'invalid password': 'סיסמא שגויה', 'Invalid password': 'Invalid password', 'invalid password.': 'invalid password.', 'Invalid Query': 'שאילתה לא תקינה', 'invalid request': 'בקשה לא תקינה', 'Invalid request': 'Invalid request', 'Invalid reset password': 'Invalid reset password', 'invalid table names (auth_* tables already defined)': 'invalid table names (auth_* tables already defined)', 'invalid ticket': 'דו"ח שגיאה לא קיים', 'Invalid user': 'Invalid user', 'Invalid username': 'Invalid username', 'Invitation to join %(site)s': 'Invitation to join %(site)s', 'Key': 'Key', 'Key verified': 'Key verified', 'Keyboard shortcuts': 'Keyboard shortcuts', 'kill process': 'kill process', 'language file "%(filename)s" created/updated': 'קובץ השפה "%(filename)s" נוצר\\עודכן', 'Language files (static strings) updated': 'קובץ השפה (מחרוזות סטאטיות) עודכן', 'languages': 'שפות', 'Languages': 'שפות', 'Last name': 'Last name', 'Last Revision': 'Last Revision', 'Last saved on:': 'לאחרונה נשמר בתאריך:', 'License for': 'רשיון עבור', 'License:': 'License:', 'Line Nr': 'Line Nr', 'Line number': 'Line number', 'lists by exception': 'lists by exception', 'lists by ticket': 'lists by ticket', 'Loading...': 'Loading...', 'loading...': 'טוען...', 'Local Apps': 'Local Apps', 'locals': 'locals', 'Locals##debug': 'Locals##debug', 'Log In': 'Log In', 'Logged in': 'Logged in', 'Logged out': 'Logged out', 'Login': 'התחבר', 'login': 'התחבר', 'Login disabled by administrator': 'Login disabled by administrator', 'Login successful': 'Login successful', 'Login to the Administrative Interface': 'התחבר לממשק המנהל', 'Login/Register': 'Login/Register', 'Logout': 'התנתק', 'lost password': 'lost password', 'Main Menu': 'Main Menu', 'Manage': 'Manage', 'Manage %(action)s': 'Manage %(action)s', 'Manage Access Control': 'Manage Access Control', 'Manage Admin Users/Students': 'Manage Admin Users/Students', 'Manage Cache': 'Manage Cache', 'Manage Students': 'Manage Students', 'Memberships': 'Memberships', 'merge': 'מזג', 'Models': 'מבני נתונים', 'models': 'מבני נתונים', 'Modified On': 'Modified On', 'Modules': 'מודולים', 'modules': 'מודולים', 'Multi User Mode': 'Multi User Mode', 'Name': 'Name', 'new application "%s" created': 'האפליקציה "%s" נוצרה', 'new application "%s" imported': 'new application "%s" imported', 'New Application Wizard': 'New Application Wizard', 'New application wizard': 'New application wizard', 'New password': 'New password', 'new plugin installed': 'פלאגין חדש הותקן', 'New plugin installed: %s': 'New plugin installed: %s', 'New Record': 'רשומה חדשה', 'new record inserted': 'הרשומה נוספה', 'New simple application': 'New simple application', 'next': 'next', 'next %s rows': 'next %s rows', 'next 100 rows': '100 הרשומות הבאות', 'NO': 'לא', 'no changes': 'no changes', 'No databases in this application': 'אין מסדי נתונים לאפליקציה זו', 'No Interaction yet': 'No Interaction yet', 'no match': 'לא נמצאה התאמה', 'no package selected': 'no package selected', 'no permission to uninstall "%s"': 'no permission to uninstall "%s"', 'Node:': 'Node:', 'Not Authorized': 'Not Authorized', 'Not supported': 'Not supported', 'Note: If you receive an error with github status code of 128, ensure the system and account you are deploying from has a cooresponding ssh key configured in the openshift account.': 'Note: If you receive an error with github status code of 128, ensure the system and account you are deploying from has a cooresponding ssh key configured in the openshift account.', 'Object or table name': 'Object or table name', 'Old password': 'Old password', "On production, you'll have to configure your webserver to use one process and multiple threads to use this debugger.": "On production, you'll have to configure your webserver to use one process and multiple threads to use this debugger.", 'Open new app in new window': 'Open new app in new window', 'OpenShift Deployment Interface': 'OpenShift Deployment Interface', 'OpenShift Output': 'OpenShift Output', 'or alternatively': 'or alternatively', 'Or Get from URL:': 'Or Get from URL:', 'or import from csv file': 'או יבא מקובץ csv', 'or provide app url:': 'או ספק כתובת url של אפליקציה', 'Origin': 'Origin', 'Original/Translation': 'מקור\\תרגום', 'Overview': 'Overview', 'Overwrite installed app': 'התקן על גבי אפלקציה מותקנת', 'Pack all': 'ארוז הכל', 'Pack compiled': 'ארוז מקומפל', 'Pack custom': 'Pack custom', 'pack plugin': 'ארוז תוסף', 'PAM authenticated user, cannot change password here': 'שינוי סיסמא באמצעות PAM אינו יכול להתבצע כאן', 'Password': 'Password', 'password changed': 'סיסמא שונתה', 'Password changed': 'Password changed', "Password fields don't match": "Password fields don't match", 'Password reset': 'Password reset', 'Password retrieve': 'Password retrieve', 'Past revisions': 'Past revisions', 'Path to appcfg.py': 'Path to appcfg.py', 'Path to local openshift repo root.': 'Path to local openshift repo root.', 'Peeking at file': 'מעיין בקובץ', 'Permission': 'Permission', 'Permissions': 'Permissions', 'Please': 'Please', 'please input your password again': 'please input your password again', 'Please wait, giving pythonanywhere a moment...': 'Please wait, giving pythonanywhere a moment...', 'plugin "%(plugin)s" deleted': 'תוסף "%(plugin)s" נמחק', 'Plugin "%s" in application': 'פלאגין "%s" של אפליקציה', 'plugin not specified': 'plugin not specified', 'Plugin page': 'Plugin page', 'plugins': 'plugins', 'Plugins': 'תוספים', 'Plural Form #%s': 'Plural Form #%s', 'Plural-Forms:': 'Plural-Forms:', 'Powered by': 'מופעל ע"י', 'Preferences saved correctly': 'Preferences saved correctly', 'Preferences saved on session only': 'Preferences saved on session only', 'previous %s rows': 'previous %s rows', 'previous 100 rows': '100 הרשומות הקודמות', 'Private files': 'Private files', 'private files': 'private files', 'Profile updated': 'Profile updated', 'Project Progress': 'Project Progress', 'Pull': 'Pull', 'Pull failed, certain files could not be checked out. Check logs for details.': 'Pull failed, certain files could not be checked out. Check logs for details.', 'Pull is not possible because you have unmerged files. Fix them up in the work tree, and then try again.': 'Pull is not possible because you have unmerged files. Fix them up in the work tree, and then try again.', 'Push': 'Push', 'Push failed, there are unmerged entries in the cache. Resolve merge issues manually and try again.': 'Push failed, there are unmerged entries in the cache. Resolve merge issues manually and try again.', 'pygraphviz library not found': 'pygraphviz library not found', 'PythonAnywhere Apps': 'PythonAnywhere Apps', 'PythonAnywhere Password': 'PythonAnywhere Password', 'Query:': 'שאילתה:', 'RAM': 'RAM', 'RAM Cache Keys': 'RAM Cache Keys', 'Ram Cleared': 'Ram Cleared', 'Rapid Search': 'Rapid Search', 'Record': 'Record', 'record': 'רשומה', 'Record %(id)s created': 'Record %(id)s created', 'Record %(id)s deleted': 'Record %(id)s deleted', 'Record %(id)s read': 'Record %(id)s read', 'Record %(id)s updated': 'Record %(id)s updated', 'Record Created': 'Record Created', 'Record Deleted': 'Record Deleted', 'record does not exist': 'הרשומה אינה קיימת', 'record id': 'מזהה רשומה', 'Record id': 'Record id', 'Record ID': 'Record ID', 'Record Updated': 'Record Updated', 'refresh': 'refresh', 'register': 'register', 'Registration identifier': 'Registration identifier', 'Registration is pending approval': 'Registration is pending approval', 'Registration key': 'Registration key', 'Registration needs verification': 'Registration needs verification', 'Registration successful': 'Registration successful', 'Reload routes': 'Reload routes', 'Remember me (for 30 days)': 'Remember me (for 30 days)', 'Remove compiled': 'הסר מקומפל', 'Removed Breakpoint on %s at line %s': 'Removed Breakpoint on %s at line %s', 'Replace': 'Replace', 'Replace All': 'Replace All', 'Repository (%s)': 'Repository (%s)', 'request': 'request', 'Request reset password': 'Request reset password', 'requires distutils, but not installed': 'requires distutils, but not installed', 'requires python-git, but not installed': 'requires python-git, but not installed', 'Reset Password key': 'Reset Password key', 'Resolve Conflict file': 'הסר קובץ היוצר קונפליקט', 'response': 'response', 'restart': 'restart', 'restore': 'שחזר', 'return': 'return', 'Revert': 'Revert', 'revert': 'חזור לגירסא קודמת', 'reverted to revision %s': 'reverted to revision %s', 'Revision %s': 'Revision %s', 'Revision:': 'Revision:', 'Role': 'Role', 'Roles': 'Roles', 'Rows in Table': 'Rows in Table', 'Rows in table': 'רשומות בטבלה', 'Rows selected': 'רשומות נבחרו', 'rules are not defined': 'rules are not defined', 'Run tests': 'Run tests', 'Run tests in this file': 'Run tests in this file', "Run tests in this file (to run all files, you may also use the button labelled 'test')": "Run tests in this file (to run all files, you may also use the button labelled 'test')", 'Running on %s': 'Running on %s', 'Save': 'Save', 'Save file:': 'Save file:', 'Save file: %s': 'Save file: %s', 'Save model as...': 'Save model as...', 'Save via Ajax': 'Save via Ajax', 'Saved file hash:': 'גיבוב הקובץ השמור:', 'Screenshot %s': 'Screenshot %s', 'Search': 'Search', 'Select Files to Package': 'Select Files to Package', 'selected': 'נבחרו', 'session': 'session', 'session expired': 'תם הסשן', 'Session saved correctly': 'Session saved correctly', 'Session saved on session only': 'Session saved on session only', 'Set Breakpoint on %s at line %s: %s': 'Set Breakpoint on %s at line %s: %s', 'shell': 'שורת פקודה', 'Showing %s to %s of %s %s found': 'Showing %s to %s of %s %s found', 'Sign Up': 'Sign Up', 'Singular Form': 'Singular Form', 'Site': 'אתר', 'Size of cache:': 'Size of cache:', 'skip to generate': 'skip to generate', 'some files could not be removed': 'לא ניתן היה להסיר חלק מהקבצים', 'Something went wrong please wait a few minutes before retrying': 'Something went wrong please wait a few minutes before retrying', 'Sorry, could not find mercurial installed': 'Sorry, could not find mercurial installed', 'source : db': 'source : db', 'source : filesystem': 'source : filesystem', 'Start a new app': 'Start a new app', 'Start searching': 'Start searching', 'Start wizard': 'start wizard', 'state': 'מצב', 'Static': 'Static', 'static': 'קבצים סטאטיים', 'Static files': 'קבצים סטאטיים', 'Statistics': 'Statistics', 'Step': 'Step', 'step': 'step', 'stop': 'stop', 'submit': 'שלח', 'Submit': 'Submit', 'successful': 'successful', 'Sure you want to delete this object?': 'האם אתה בטוח שברצונך למחוק אובייקט זה?', 'switch to : db': 'switch to : db', 'switch to : filesystem': 'switch to : filesystem', 'Tab width (# characters)': 'Tab width (# characters)', 'table': 'טבלה', 'Table': 'Table', 'Temporary': 'Temporary', 'test': 'בדיקות', 'Testing application': 'בודק את האפליקציה', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"שאליתה" היא תנאי כגון "db1.table1.filed1=\'value\'" ביטוי כמו db.table1.field1=db.table2.field1 יחולל join', 'The app exists, was created by wizard, continue to overwrite!': 'The app exists, was created by wizard, continue to overwrite!', 'The app exists, was NOT created by wizard, continue to overwrite!': 'The app exists, was NOT created by wizard, continue to overwrite!', 'the application logic, each URL path is mapped in one exposed function in the controller': 'הלוגיקה של האפליקציה, כל url ממופה לפונקציה חשופה בבקר', 'The application logic, each URL path is mapped in one exposed function in the controller': 'The application logic, each URL path is mapped in one exposed function in the controller', 'the data representation, define database tables and sets': 'ייצוג המידע, בו מוגדרים טבלאות ומבנים', 'The data representation, define database tables and sets': 'The data representation, define database tables and sets', 'The presentations layer, views are also known as templates': 'The presentations layer, views are also known as templates', 'the presentations layer, views are also known as templates': 'שכבת התצוגה, המכונה גם template', 'Theme': 'Theme', 'There are no controllers': 'אין בקרים', 'There are no models': 'אין מבני נתונים', 'There are no modules': 'אין מודולים', 'There are no plugins': 'There are no plugins', 'There are no private files': 'There are no private files', 'There are no static files': 'אין קבצים סטאטיים', 'There are no translators': 'There are no translators', 'There are no translators, only default language is supported': 'אין תרגומים. רק שפת ברירת המחדל נתמכת', 'There are no views': 'אין קבצי תצוגה', 'These files are not served, they are only available from within your app': 'These files are not served, they are only available from within your app', 'These files are served without processing, your images go here': 'These files are served without processing, your images go here', 'these files are served without processing, your images go here': 'אלו הם קבצים הנשלחים מהשרת ללא עיבוד. הכנס את התמונות כאן', 'This code was emailed to you and is required for login.': 'This code was emailed to you and is required for login.', "This debugger may not work properly if you don't have a threaded webserver or you're using multiple daemon processes.": "This debugger may not work properly if you don't have a threaded webserver or you're using multiple daemon processes.", 'This email already has an account': 'This email already has an account', 'This is an experimental feature and it needs more testing. If you decide to downgrade you do it at your own risk': 'This is an experimental feature and it needs more testing. If you decide to downgrade you do it at your own risk', 'This is an experimental feature and it needs more testing. If you decide to upgrade you do it at your own risk': 'This is an experimental feature and it needs more testing. If you decide to upgrade you do it at your own risk', 'This is the %(filename)s template': 'זוהי תבנית הקובץ %(filename)s ', "This page can commit your changes to an openshift app repo and push them to your cloud instance. This assumes that you've already created the application instance using the web2py skeleton and have that repo somewhere on a filesystem that this web2py instance can access. This functionality requires GitPython installed and on the python path of the runtime that web2py is operating in.": "This page can commit your changes to an openshift app repo and push them to your cloud instance. This assumes that you've already created the application instance using the web2py skeleton and have that repo somewhere on a filesystem that this web2py instance can access. This functionality requires GitPython installed and on the python path of the runtime that web2py is operating in.", 'This page can upload your application to the Google App Engine computing cloud. Mind that you must first create indexes locally and this is done by installing the Google appserver and running the app locally with it once, or there will be errors when selecting records. Attention: deployment may take long time, depending on the network speed. Attention: it will overwrite your app.yaml. DO NOT SUBMIT TWICE.': 'This page can upload your application to the Google App Engine computing cloud. Mind that you must first create indexes locally and this is done by installing the Google appserver and running the app locally with it once, or there will be errors when selecting records. Attention: deployment may take long time, depending on the network speed. Attention: it will overwrite your app.yaml. DO NOT SUBMIT TWICE.', 'this page to see if a breakpoint was hit and debug interaction is required.': 'this page to see if a breakpoint was hit and debug interaction is required.', 'This will pull changes from the remote repo for application "%s"?': 'This will pull changes from the remote repo for application "%s"?', 'This will push changes to the remote repo for application "%s".': 'This will push changes to the remote repo for application "%s".', 'Ticket': 'דו"ח שגיאה', 'Ticket ID': 'Ticket ID', 'Ticket Missing': 'Ticket Missing', 'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)', 'Timestamp': 'Timestamp', 'TM': 'סימן רשום', 'to previous version.': 'אין גירסא קודמת', 'To create a plugin, name a file/folder plugin_[name]': 'כדי ליצור תוסף, קרא לקובץ או סיפריה בשם לפי התבנית plugin_[name]', 'To emulate a breakpoint programatically, write:': 'To emulate a breakpoint programatically, write:', 'to use the debugger!': 'to use the debugger!', 'toggle breakpoint': 'toggle breakpoint', 'Toggle comment': 'Toggle comment', 'Toggle Fullscreen': 'Toggle Fullscreen', 'Traceback': 'Traceback', 'translation strings for the application': 'מחרוזות תרגום עבור האפליקציה', 'Translation strings for the application': 'Translation strings for the application', 'try': 'נסה', 'try something like': 'נסה משהו כמו', 'Try the mobile interface': 'Try the mobile interface', 'try view': 'try view', 'Two-step Login Authentication Code': 'Two-step Login Authentication Code', 'Type PDB debugger command in here and hit Return (Enter) to execute it.': 'Type PDB debugger command in here and hit Return (Enter) to execute it.', 'Type some Python code in here and hit Return (Enter) to execute it.': 'Type some Python code in here and hit Return (Enter) to execute it.', 'Unable to check for upgrades': 'לא ניתן היה לבדוק אם יש שדרוגים', 'unable to create application "%s"': 'נכשל ביצירת האפליקציה "%s"', 'unable to delete file "%(filename)s"': 'נכשל במחיקת הקובץ "%(filename)s"', 'unable to delete file plugin "%(plugin)s"': 'נכשל במחיקת התוסף "%(plugin)s"', 'Unable to determine the line number!': 'Unable to determine the line number!', 'Unable to download app because:': 'לא ניתן היה להוריד את האפליקציה כי:', 'Unable to download because': 'לא הצלחתי להוריד כי', 'unable to download layout': 'unable to download layout', 'unable to download plugin: %s': 'unable to download plugin: %s', 'Unable to download the list of plugins': 'Unable to download the list of plugins', 'unable to install plugin "%s"': 'unable to install plugin "%s"', 'unable to parse csv file': 'לא הצלחתי לנתח את הקלט של קובץ csv', 'Unable to send email': 'Unable to send email', 'unable to uninstall "%s"': 'לא ניתן להסיר את "%s"', 'unable to upgrade because "%s"': 'לא ניתן היה לשדרג כי "%s"', 'uncheck all': 'הסר סימון מהכל', 'Uninstall': 'הסר התקנה', 'Unsupported webserver working mode: %s': 'Unsupported webserver working mode: %s', 'update': 'עדכן', 'update all languages': 'עדכן את כלל קיבצי השפה', 'Update:': 'עדכן:', 'Upgrade': 'Upgrade', 'upgrade now': 'upgrade now', 'upgrade now to %s': 'upgrade now to %s', 'upgrade web2py now': 'שדרג את web2py עכשיו', 'upload': 'upload', 'Upload': 'Upload', 'Upload & install packed application': 'העלה והתקן אפליקציה ארוזה', 'Upload a package:': 'Upload a package:', 'Upload and install packed application': 'Upload and install packed application', 'upload application:': 'העלה אפליקציה:', 'upload file:': 'העלה קובץ:', 'upload plugin file:': 'העלה קובץ תוסף:', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'השתמש ב (...)&(...) עבור תנאי AND, (...)|(...) עבור תנאי OR ו~(...) עבור תנאי NOT ליצירת שאילתות מורכבות', 'Use an url:': 'Use an url:', 'User': 'User', 'User %(id)s is impersonating %(other_id)s': 'User %(id)s is impersonating %(other_id)s', 'User %(id)s Logged-in': 'User %(id)s Logged-in', 'User %(id)s Logged-out': 'User %(id)s Logged-out', 'User %(id)s Password changed': 'User %(id)s Password changed', 'User %(id)s Password reset': 'User %(id)s Password reset', 'User %(id)s Password retrieved': 'User %(id)s Password retrieved', 'User %(id)s Profile updated': 'User %(id)s Profile updated', 'User %(id)s Registered': 'User %(id)s Registered', 'User %(id)s Username retrieved': 'User %(id)s Username retrieved', 'User %(id)s Verification email sent': 'User %(id)s Verification email sent', 'User %(id)s verified registration key': 'User %(id)s verified registration key', 'User ID': 'User ID', 'Username': 'Username', 'Username already taken': 'Username already taken', 'Username retrieve': 'Username retrieve', 'Users': 'Users', 'Using the shell may lock the database to other users of this app.': 'Using the shell may lock the database to other users of this app.', 'variables': 'משתנים', 'Verify Password': 'Verify Password', 'Version': 'גירסא', 'Versioning': 'Versioning', 'versioning': 'מנגנון גירסאות', 'view': 'הצג', 'Views': 'מראה', 'views': 'מראה', 'Warning!': 'Warning!', 'WARNING:': 'WARNING:', 'WARNING: The following views could not be compiled:': 'WARNING: The following views could not be compiled:', 'Web Framework': 'Web Framework', 'web2py Admin Password': 'web2py Admin Password', 'web2py apps to deploy': 'web2py apps to deploy', 'web2py Debugger': 'web2py Debugger', 'web2py downgrade': 'web2py downgrade', 'web2py is up to date': 'web2py מותקנת בגירסתה האחרונה', 'web2py online debugger': 'web2py online debugger', 'web2py Recent Tweets': 'ציוצים אחרונים של web2py', 'web2py upgrade': 'web2py upgrade', 'web2py upgraded; please restart it': 'web2py שודרגה; נא אתחל אותה', 'Welcome %(username)s! Click on the link %(link)s to verify your email': 'Welcome %(username)s! Click on the link %(link)s to verify your email', 'Working...': 'Working...', 'WSGI reference name': 'WSGI reference name', 'YES': 'כן', 'Yes': 'Yes', 'You can also set and remove breakpoint in the edit window, using the Toggle Breakpoint button': 'You can also set and remove breakpoint in the edit window, using the Toggle Breakpoint button', 'You can inspect variables using the console below': 'You can inspect variables using the console below', 'You have been invited to join %(site)s, click %(link)s to complete the process': 'You have been invited to join %(site)s, click %(link)s to complete the process', 'You have one more login attempt before you are locked out': 'You have one more login attempt before you are locked out', 'You need to set up and reach a': 'You need to set up and reach a', 'You only need these if you have already registered': 'You only need these if you have already registered', 'Your application will be blocked until you click an action button (next, step, continue, etc.)': 'Your application will be blocked until you click an action button (next, step, continue, etc.)', 'Your password is: %(password)s': 'Your password is: %(password)s', 'Your temporary login code is {0}': 'Your temporary login code is {0}', 'Your username is: %(username)s': 'Your username is: %(username)s', 'Your username was emailed to you': 'Your username was emailed to you', }
true
true
f72467ab96456a59cb16d087533c917c2a6562da
3,363
py
Python
google/ads/google_ads/v1/proto/services/asset_service_pb2_grpc.py
jiulongw/google-ads-python
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
[ "Apache-2.0" ]
1
2019-11-30T23:42:39.000Z
2019-11-30T23:42:39.000Z
google/ads/google_ads/v1/proto/services/asset_service_pb2_grpc.py
jiulongw/google-ads-python
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
[ "Apache-2.0" ]
null
null
null
google/ads/google_ads/v1/proto/services/asset_service_pb2_grpc.py
jiulongw/google-ads-python
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
[ "Apache-2.0" ]
1
2020-09-30T17:04:06.000Z
2020-09-30T17:04:06.000Z
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc from google.ads.google_ads.v1.proto.resources import asset_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_asset__pb2 from google.ads.google_ads.v1.proto.services import asset_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2 class AssetServiceStub(object): """Proto file describing the Asset service. Service to manage assets. Asset types can be created with AssetService are YoutubeVideoAsset, MediaBundleAsset and ImageAsset. TextAsset should be created with Ad inline. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetAsset = channel.unary_unary( '/google.ads.googleads.v1.services.AssetService/GetAsset', request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2.GetAssetRequest.SerializeToString, response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_asset__pb2.Asset.FromString, ) self.MutateAssets = channel.unary_unary( '/google.ads.googleads.v1.services.AssetService/MutateAssets', request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2.MutateAssetsRequest.SerializeToString, response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2.MutateAssetsResponse.FromString, ) class AssetServiceServicer(object): """Proto file describing the Asset service. Service to manage assets. Asset types can be created with AssetService are YoutubeVideoAsset, MediaBundleAsset and ImageAsset. TextAsset should be created with Ad inline. """ def GetAsset(self, request, context): """Returns the requested asset in full detail. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def MutateAssets(self, request, context): """Creates assets. Operation statuses are returned. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_AssetServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'GetAsset': grpc.unary_unary_rpc_method_handler( servicer.GetAsset, request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2.GetAssetRequest.FromString, response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_asset__pb2.Asset.SerializeToString, ), 'MutateAssets': grpc.unary_unary_rpc_method_handler( servicer.MutateAssets, request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2.MutateAssetsRequest.FromString, response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2.MutateAssetsResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'google.ads.googleads.v1.services.AssetService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
46.068493
152
0.803152
import grpc from google.ads.google_ads.v1.proto.resources import asset_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_asset__pb2 from google.ads.google_ads.v1.proto.services import asset_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2 class AssetServiceStub(object): def __init__(self, channel): self.GetAsset = channel.unary_unary( '/google.ads.googleads.v1.services.AssetService/GetAsset', request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2.GetAssetRequest.SerializeToString, response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_asset__pb2.Asset.FromString, ) self.MutateAssets = channel.unary_unary( '/google.ads.googleads.v1.services.AssetService/MutateAssets', request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2.MutateAssetsRequest.SerializeToString, response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2.MutateAssetsResponse.FromString, ) class AssetServiceServicer(object): def GetAsset(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def MutateAssets(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_AssetServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'GetAsset': grpc.unary_unary_rpc_method_handler( servicer.GetAsset, request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2.GetAssetRequest.FromString, response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_asset__pb2.Asset.SerializeToString, ), 'MutateAssets': grpc.unary_unary_rpc_method_handler( servicer.MutateAssets, request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2.MutateAssetsRequest.FromString, response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_asset__service__pb2.MutateAssetsResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'google.ads.googleads.v1.services.AssetService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
true
true
f72468636003b664ce87050e414e28fea873cd2a
6,582
py
Python
uhd_restpy/testplatform/sessions/ixnetwork/impairment/link/link.py
OpenIxia/ixnetwork_restpy
f628db450573a104f327cf3c737ca25586e067ae
[ "MIT" ]
20
2019-05-07T01:59:14.000Z
2022-02-11T05:24:47.000Z
uhd_restpy/testplatform/sessions/ixnetwork/impairment/link/link.py
OpenIxia/ixnetwork_restpy
f628db450573a104f327cf3c737ca25586e067ae
[ "MIT" ]
60
2019-04-03T18:59:35.000Z
2022-02-22T12:05:05.000Z
uhd_restpy/testplatform/sessions/ixnetwork/impairment/link/link.py
OpenIxia/ixnetwork_restpy
f628db450573a104f327cf3c737ca25586e067ae
[ "MIT" ]
13
2019-05-20T10:48:31.000Z
2021-10-06T07:45:44.000Z
# MIT LICENSE # # Copyright 1997 - 2020 by IXIA Keysight # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from uhd_restpy.base import Base from uhd_restpy.files import Files from typing import List, Any, Union class Link(Base): """List of impairment links. Each link consists of a pair of ports. The Link class encapsulates a list of link resources that are managed by the system. A list of resources can be retrieved from the server using the Link.find() method. """ __slots__ = () _SDM_NAME = 'link' _SDM_ATT_MAP = { 'ForwardingInterruption': 'forwardingInterruption', 'Name': 'name', 'RxPortName': 'rxPortName', 'TxPortName': 'txPortName', } _SDM_ENUM_MAP = { } def __init__(self, parent, list_op=False): super(Link, self).__init__(parent, list_op) @property def LosLof(self): """ Returns ------- - obj(uhd_restpy.testplatform.sessions.ixnetwork.impairment.link.loslof.loslof.LosLof): An instance of the LosLof class Raises ------ - ServerError: The server has encountered an uncategorized error condition """ from uhd_restpy.testplatform.sessions.ixnetwork.impairment.link.loslof.loslof import LosLof if self._properties.get('LosLof', None) is not None: return self._properties.get('LosLof') else: return LosLof(self)._select() @property def ForwardingInterruption(self): # type: () -> bool """ Returns ------- - bool: Emulate a link fault. Drop all packets received. """ return self._get_attribute(self._SDM_ATT_MAP['ForwardingInterruption']) @ForwardingInterruption.setter def ForwardingInterruption(self, value): # type: (bool) -> None self._set_attribute(self._SDM_ATT_MAP['ForwardingInterruption'], value) @property def Name(self): # type: () -> str """ Returns ------- - str: The name of the link: receiving port -> transmitting port. """ return self._get_attribute(self._SDM_ATT_MAP['Name']) @property def RxPortName(self): # type: () -> str """ Returns ------- - str: The name of the receiving port. """ return self._get_attribute(self._SDM_ATT_MAP['RxPortName']) @property def TxPortName(self): # type: () -> str """ Returns ------- - str: The name of the transmitting port. """ return self._get_attribute(self._SDM_ATT_MAP['TxPortName']) def update(self, ForwardingInterruption=None): # type: (bool) -> Link """Updates link resource on the server. Args ---- - ForwardingInterruption (bool): Emulate a link fault. Drop all packets received. Raises ------ - ServerError: The server has encountered an uncategorized error condition """ return self._update(self._map_locals(self._SDM_ATT_MAP, locals())) def add(self, ForwardingInterruption=None): # type: (bool) -> Link """Adds a new link resource on the json, only valid with config assistant Args ---- - ForwardingInterruption (bool): Emulate a link fault. Drop all packets received. Returns ------- - self: This instance with all currently retrieved link resources using find and the newly added link resources available through an iterator or index Raises ------ - Exception: if this function is not being used with config assistance """ return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals())) def find(self, ForwardingInterruption=None, Name=None, RxPortName=None, TxPortName=None): # type: (bool, str, str, str) -> Link """Finds and retrieves link resources from the server. All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve link resources from the server. To retrieve an exact match ensure the parameter value starts with ^ and ends with $ By default the find method takes no parameters and will retrieve all link resources from the server. Args ---- - ForwardingInterruption (bool): Emulate a link fault. Drop all packets received. - Name (str): The name of the link: receiving port -> transmitting port. - RxPortName (str): The name of the receiving port. - TxPortName (str): The name of the transmitting port. Returns ------- - self: This instance with matching link resources retrieved from the server available through an iterator or index Raises ------ - ServerError: The server has encountered an uncategorized error condition """ return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) def read(self, href): """Retrieves a single instance of link data from the server. Args ---- - href (str): An href to the instance to be retrieved Returns ------- - self: This instance with the link resources from the server available through an iterator or index Raises ------ - NotFoundError: The requested resource does not exist on the server - ServerError: The server has encountered an uncategorized error condition """ return self._read(href)
36.164835
158
0.647068
from uhd_restpy.base import Base from uhd_restpy.files import Files from typing import List, Any, Union class Link(Base): __slots__ = () _SDM_NAME = 'link' _SDM_ATT_MAP = { 'ForwardingInterruption': 'forwardingInterruption', 'Name': 'name', 'RxPortName': 'rxPortName', 'TxPortName': 'txPortName', } _SDM_ENUM_MAP = { } def __init__(self, parent, list_op=False): super(Link, self).__init__(parent, list_op) @property def LosLof(self): from uhd_restpy.testplatform.sessions.ixnetwork.impairment.link.loslof.loslof import LosLof if self._properties.get('LosLof', None) is not None: return self._properties.get('LosLof') else: return LosLof(self)._select() @property def ForwardingInterruption(self): return self._get_attribute(self._SDM_ATT_MAP['ForwardingInterruption']) @ForwardingInterruption.setter def ForwardingInterruption(self, value): self._set_attribute(self._SDM_ATT_MAP['ForwardingInterruption'], value) @property def Name(self): return self._get_attribute(self._SDM_ATT_MAP['Name']) @property def RxPortName(self): return self._get_attribute(self._SDM_ATT_MAP['RxPortName']) @property def TxPortName(self): return self._get_attribute(self._SDM_ATT_MAP['TxPortName']) def update(self, ForwardingInterruption=None): return self._update(self._map_locals(self._SDM_ATT_MAP, locals())) def add(self, ForwardingInterruption=None): return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals())) def find(self, ForwardingInterruption=None, Name=None, RxPortName=None, TxPortName=None): return self._select(self._map_locals(self._SDM_ATT_MAP, locals())) def read(self, href): return self._read(href)
true
true
f724688f8543cbeb74c6f94451f45052c8b2c1af
11,189
py
Python
plugins/modules/oracle_profile.py
blaf-cgi/ansible-oracle-modules
37905c6ad91808a96f0085c9c1069e166f2e17b4
[ "MIT" ]
8
2020-08-11T04:21:24.000Z
2021-12-03T16:21:56.000Z
plugins/modules/oracle_profile.py
blaf-cgi/ansible-oracle-modules
37905c6ad91808a96f0085c9c1069e166f2e17b4
[ "MIT" ]
4
2021-03-13T09:09:28.000Z
2022-01-07T12:38:02.000Z
plugins/modules/oracle_profile.py
blaf-cgi/ansible-oracle-modules
37905c6ad91808a96f0085c9c1069e166f2e17b4
[ "MIT" ]
3
2021-03-16T13:48:57.000Z
2022-03-02T10:43:47.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = ''' --- module: oracle_profile short_description: Manage profiles in an Oracle database description: - Manage profiles in an Oracle database version_added: "0.8.0" options: name: description: - The name of the profile required: true default: None aliases: ['profile'] state: description: - The intended state of the profile. default: present choices: ['present','absent'] attribute_name: description: - The attribute name (e.g PASSWORD_REUSE_TIME) default: None aliases: ['an'] attribute_value: description: - The attribute value (e.g 10) default: None aliases: ['av'] username: description: - The DB username required: false default: sys aliases: ['un'] password: description: - The password for the DB user required: false default: None aliases: ['pw'] service_name: description: - The profile_name to connect to the database. required: false aliases: ['sn'] hostname: description: - The host of the database if using dbms_profile required: false default: localhost aliases: ['host'] port: description: - The listener port to connect to the database if using dbms_profile required: false default: 1521 oracle_home: description: - The GI ORACLE_HOME required: false default: None aliases: ['oh'] notes: - cx_Oracle needs to be installed requirements: [ "cx_Oracle" ] author: Mikael Sandström, oravirt@gmail.com, @oravirt ''' EXAMPLES = ''' # Create a profile - hosts: dbserver vars: oracle_home: /u01/app/oracle/12.2.0.1/db1 hostname: "{{ inventory_hostname }}" service_name: orclpdb user: system password: Oracle_123 oracle_env: ORACLE_HOME: "{{ oracle_home }}" LD_LIBRARY_PATH: "{{ oracle_home }}/lib" profiles: - name: profile1 attribute_name: - password_reuse_max - password_reuse_time - sessions_per_user attribute_value: - 6 - 20 - 5 state: present tasks: - name: Manage profiles oracle_profile: name={{ item.name }} attribute_name={{ item.attribute_name}} attribute_value={{ item.attribute_value}} state={{ item.state }} hostname={{ hostname }} service_name={{ service_name }} user={{ user }} password={{ password }} environment: "{{oracle_env}}" with_items: "{{ profiles }}" ''' try: import cx_Oracle except ImportError: cx_oracle_exists = False else: cx_oracle_exists = True # Check if the profile exists def check_profile_exists(cursor, module, msg, name): sql = 'select count(*) from dba_profiles where lower (profile) = \'%s\'' % (name.lower()) result = execute_sql_get(module, msg, cursor, sql) if result[0][0] > 0: return True else: return False def create_profile(cursor, module, msg, oracle_home, name, attribute_name, attribute_value): add_attr = False if not any(x == 'None' for x in attribute_name): add_attr = True if not any(x is None for x in attribute_name): add_attr = True if add_attr: attributes = ' '.join(['' + str(n) + ' ' + str(v) + '' for n, v in zip(attribute_name, attribute_value)]) sql = 'create profile %s limit ' % name if add_attr: sql += ' %s' % (attributes.lower()) if execute_sql(module, msg, cursor, sql): return True else: return False def remove_profile(cursor, module, msg, oracle_home, name): dropsql = 'drop profile %s' % name if execute_sql(module, msg, cursor, dropsql): return True else: return False def ensure_profile_state(cursor, module, msg, name, state, attribute_name, attribute_value): # pass total_sql = [] profile_sql = 'alter profile %s ' % (name.upper()) # Deal with attribute differences if attribute_name and attribute_value: # Make sure attributes are lower case attribute_name = [x.lower() for x in attribute_name] attribute_value = [str(y).lower() for y in attribute_value] wanted_attributes = zip(attribute_name, attribute_value) # Check the current attributes attribute_names_ = ','.join(['\'' + n[0] + '\'' for n in wanted_attributes]) if len(attribute_names_) != 0: current_attributes = get_current_attributes(cursor, module, msg, name, attribute_names_) # Convert to dict and compare current with wanted if dict(current_attributes) != dict(wanted_attributes): for i in wanted_attributes: total_sql.append("alter profile %s limit %s %s " % (name, i[0], i[1])) # module.exit_json(msg=total_sql, changed=True) if len(total_sql) > 0: if ensure_profile_state_sql(module, msg, cursor, total_sql): msg = 'profile %s has been put in the intended state' % name module.exit_json(msg=msg, changed=True) else: return False else: msg = 'Nothing to do' module.exit_json(msg=msg, changed=False) def ensure_profile_state_sql(module, msg, cursor, total_sql): for sql in total_sql: execute_sql(module, msg, cursor, sql) return True def get_current_attributes(cursor, module, msg, name, attribute_names_): sql = 'select lower(resource_name),lower(limit) ' sql += 'from dba_profiles ' sql += 'where lower(profile) = \'%s\' ' % (name.lower()) sql += 'and lower(resource_name) in (%s) ' % (attribute_names_.lower()) result = execute_sql_get(module, msg, cursor, sql) return result def execute_sql_get(module, msg, cursor, sql): try: cursor.execute(sql) result = (cursor.fetchall()) except cx_Oracle.DatabaseError as exc: error, = exc.args msg = 'Something went wrong while executing sql_get - %s sql: %s' % (error.message, sql) module.fail_json(msg=msg, changed=False) return False return result def execute_sql(module, msg, cursor, sql): try: cursor.execute(sql) except cx_Oracle.DatabaseError as exc: error, = exc.args msg = 'Something went wrong while executing sql - %s sql: %s' % (error.message, sql) module.fail_json(msg=msg, changed=False) return False return True def main(): msg = [''] cursor = None module = AnsibleModule( argument_spec=dict( name=dict(required=True, aliases=['profile']), attribute_name=dict(required=True, type='list', aliases=['an']), attribute_value=dict(required=True, type='list', aliases=['av']), state=dict(default="present", choices=["present", "absent"]), user=dict(required=False, aliases=['un', 'username']), password=dict(required=False, no_log=True, aliases=['pw']), mode=dict(default='normal', choices=["normal", "sysdba"]), hostname=dict(required=False, default='localhost', aliases=['host']), port=dict(required=False, default=1521), service_name=dict(required=False, aliases=['sn']), oracle_home=dict(required=False, aliases=['oh']), ), ) name = module.params["name"] attribute_name = module.params["attribute_name"] attribute_value = module.params["attribute_value"] state = module.params["state"] user = module.params["user"] password = module.params["password"] mode = module.params["mode"] hostname = module.params["hostname"] port = module.params["port"] service_name = module.params["service_name"] oracle_home = module.params["oracle_home"] if not cx_oracle_exists: msg = "The cx_Oracle module is required. 'pip install cx_Oracle' should do the trick. If cx_Oracle is installed, make sure ORACLE_HOME & LD_LIBRARY_PATH is set" module.fail_json(msg=msg) wallet_connect = '/@%s' % service_name try: if ( not user and not password): # If neither user or password is supplied, the use of an oracle wallet is assumed connect = wallet_connect if mode == 'sysdba': conn = cx_Oracle.connect(wallet_connect, mode=cx_Oracle.SYSDBA) else: conn = cx_Oracle.connect(wallet_connect) elif user and password: dsn = cx_Oracle.makedsn(host=hostname, port=port, service_name=service_name) connect = dsn if mode == 'sysdba': conn = cx_Oracle.connect(user, password, dsn, mode=cx_Oracle.SYSDBA) else: conn = cx_Oracle.connect(user, password, dsn) elif not user or not password: module.fail_json(msg='Missing username or password for cx_Oracle') except cx_Oracle.DatabaseError as exc: error, = exc.args msg = 'Could not connect to DB: %s, connect descriptor: %s, username: %s, pass: %s' % ( error.message, connect, user, password) module.fail_json(msg=msg, changed=False) cursor = conn.cursor() if oracle_home is not None: os.environ['ORACLE_HOME'] = oracle_home elif 'ORACLE_HOME' in os.environ: oracle_home = os.environ['ORACLE_HOME'] else: msg = 'ORACLE_HOME variable not set. Please set it and re-run the command' module.fail_json(msg=msg, changed=False) if state == 'present': if not check_profile_exists(cursor, module, msg, name): if create_profile(cursor, module, msg, oracle_home, name, attribute_name, attribute_value): msg = 'Successfully created profile %s ' % name module.exit_json(msg=msg, changed=True) else: module.fail_json(msg=msg, changed=False) else: ensure_profile_state(cursor, module, msg, name, state, attribute_name, attribute_value) elif state == 'absent': if check_profile_exists(cursor, module, msg, name): if remove_profile(cursor, module, msg, oracle_home, name): msg = 'Profile %s successfully removed' % name module.exit_json(msg=msg, changed=True) else: module.exit_json(msg=msg, changed=False) else: msg = 'Profile %s doesn\'t exist' % name module.exit_json(msg=msg, changed=False) module.exit_json(msg="Unhandled exit", changed=False) from ansible.module_utils.basic import AnsibleModule, os if __name__ == '__main__': main()
32.620991
168
0.602377
from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = ''' --- module: oracle_profile short_description: Manage profiles in an Oracle database description: - Manage profiles in an Oracle database version_added: "0.8.0" options: name: description: - The name of the profile required: true default: None aliases: ['profile'] state: description: - The intended state of the profile. default: present choices: ['present','absent'] attribute_name: description: - The attribute name (e.g PASSWORD_REUSE_TIME) default: None aliases: ['an'] attribute_value: description: - The attribute value (e.g 10) default: None aliases: ['av'] username: description: - The DB username required: false default: sys aliases: ['un'] password: description: - The password for the DB user required: false default: None aliases: ['pw'] service_name: description: - The profile_name to connect to the database. required: false aliases: ['sn'] hostname: description: - The host of the database if using dbms_profile required: false default: localhost aliases: ['host'] port: description: - The listener port to connect to the database if using dbms_profile required: false default: 1521 oracle_home: description: - The GI ORACLE_HOME required: false default: None aliases: ['oh'] notes: - cx_Oracle needs to be installed requirements: [ "cx_Oracle" ] author: Mikael Sandström, oravirt@gmail.com, @oravirt ''' EXAMPLES = ''' # Create a profile - hosts: dbserver vars: oracle_home: /u01/app/oracle/12.2.0.1/db1 hostname: "{{ inventory_hostname }}" service_name: orclpdb user: system password: Oracle_123 oracle_env: ORACLE_HOME: "{{ oracle_home }}" LD_LIBRARY_PATH: "{{ oracle_home }}/lib" profiles: - name: profile1 attribute_name: - password_reuse_max - password_reuse_time - sessions_per_user attribute_value: - 6 - 20 - 5 state: present tasks: - name: Manage profiles oracle_profile: name={{ item.name }} attribute_name={{ item.attribute_name}} attribute_value={{ item.attribute_value}} state={{ item.state }} hostname={{ hostname }} service_name={{ service_name }} user={{ user }} password={{ password }} environment: "{{oracle_env}}" with_items: "{{ profiles }}" ''' try: import cx_Oracle except ImportError: cx_oracle_exists = False else: cx_oracle_exists = True def check_profile_exists(cursor, module, msg, name): sql = 'select count(*) from dba_profiles where lower (profile) = \'%s\'' % (name.lower()) result = execute_sql_get(module, msg, cursor, sql) if result[0][0] > 0: return True else: return False def create_profile(cursor, module, msg, oracle_home, name, attribute_name, attribute_value): add_attr = False if not any(x == 'None' for x in attribute_name): add_attr = True if not any(x is None for x in attribute_name): add_attr = True if add_attr: attributes = ' '.join(['' + str(n) + ' ' + str(v) + '' for n, v in zip(attribute_name, attribute_value)]) sql = 'create profile %s limit ' % name if add_attr: sql += ' %s' % (attributes.lower()) if execute_sql(module, msg, cursor, sql): return True else: return False def remove_profile(cursor, module, msg, oracle_home, name): dropsql = 'drop profile %s' % name if execute_sql(module, msg, cursor, dropsql): return True else: return False def ensure_profile_state(cursor, module, msg, name, state, attribute_name, attribute_value): total_sql = [] profile_sql = 'alter profile %s ' % (name.upper()) if attribute_name and attribute_value: attribute_name = [x.lower() for x in attribute_name] attribute_value = [str(y).lower() for y in attribute_value] wanted_attributes = zip(attribute_name, attribute_value) attribute_names_ = ','.join(['\'' + n[0] + '\'' for n in wanted_attributes]) if len(attribute_names_) != 0: current_attributes = get_current_attributes(cursor, module, msg, name, attribute_names_) if dict(current_attributes) != dict(wanted_attributes): for i in wanted_attributes: total_sql.append("alter profile %s limit %s %s " % (name, i[0], i[1])) if len(total_sql) > 0: if ensure_profile_state_sql(module, msg, cursor, total_sql): msg = 'profile %s has been put in the intended state' % name module.exit_json(msg=msg, changed=True) else: return False else: msg = 'Nothing to do' module.exit_json(msg=msg, changed=False) def ensure_profile_state_sql(module, msg, cursor, total_sql): for sql in total_sql: execute_sql(module, msg, cursor, sql) return True def get_current_attributes(cursor, module, msg, name, attribute_names_): sql = 'select lower(resource_name),lower(limit) ' sql += 'from dba_profiles ' sql += 'where lower(profile) = \'%s\' ' % (name.lower()) sql += 'and lower(resource_name) in (%s) ' % (attribute_names_.lower()) result = execute_sql_get(module, msg, cursor, sql) return result def execute_sql_get(module, msg, cursor, sql): try: cursor.execute(sql) result = (cursor.fetchall()) except cx_Oracle.DatabaseError as exc: error, = exc.args msg = 'Something went wrong while executing sql_get - %s sql: %s' % (error.message, sql) module.fail_json(msg=msg, changed=False) return False return result def execute_sql(module, msg, cursor, sql): try: cursor.execute(sql) except cx_Oracle.DatabaseError as exc: error, = exc.args msg = 'Something went wrong while executing sql - %s sql: %s' % (error.message, sql) module.fail_json(msg=msg, changed=False) return False return True def main(): msg = [''] cursor = None module = AnsibleModule( argument_spec=dict( name=dict(required=True, aliases=['profile']), attribute_name=dict(required=True, type='list', aliases=['an']), attribute_value=dict(required=True, type='list', aliases=['av']), state=dict(default="present", choices=["present", "absent"]), user=dict(required=False, aliases=['un', 'username']), password=dict(required=False, no_log=True, aliases=['pw']), mode=dict(default='normal', choices=["normal", "sysdba"]), hostname=dict(required=False, default='localhost', aliases=['host']), port=dict(required=False, default=1521), service_name=dict(required=False, aliases=['sn']), oracle_home=dict(required=False, aliases=['oh']), ), ) name = module.params["name"] attribute_name = module.params["attribute_name"] attribute_value = module.params["attribute_value"] state = module.params["state"] user = module.params["user"] password = module.params["password"] mode = module.params["mode"] hostname = module.params["hostname"] port = module.params["port"] service_name = module.params["service_name"] oracle_home = module.params["oracle_home"] if not cx_oracle_exists: msg = "The cx_Oracle module is required. 'pip install cx_Oracle' should do the trick. If cx_Oracle is installed, make sure ORACLE_HOME & LD_LIBRARY_PATH is set" module.fail_json(msg=msg) wallet_connect = '/@%s' % service_name try: if ( not user and not password): connect = wallet_connect if mode == 'sysdba': conn = cx_Oracle.connect(wallet_connect, mode=cx_Oracle.SYSDBA) else: conn = cx_Oracle.connect(wallet_connect) elif user and password: dsn = cx_Oracle.makedsn(host=hostname, port=port, service_name=service_name) connect = dsn if mode == 'sysdba': conn = cx_Oracle.connect(user, password, dsn, mode=cx_Oracle.SYSDBA) else: conn = cx_Oracle.connect(user, password, dsn) elif not user or not password: module.fail_json(msg='Missing username or password for cx_Oracle') except cx_Oracle.DatabaseError as exc: error, = exc.args msg = 'Could not connect to DB: %s, connect descriptor: %s, username: %s, pass: %s' % ( error.message, connect, user, password) module.fail_json(msg=msg, changed=False) cursor = conn.cursor() if oracle_home is not None: os.environ['ORACLE_HOME'] = oracle_home elif 'ORACLE_HOME' in os.environ: oracle_home = os.environ['ORACLE_HOME'] else: msg = 'ORACLE_HOME variable not set. Please set it and re-run the command' module.fail_json(msg=msg, changed=False) if state == 'present': if not check_profile_exists(cursor, module, msg, name): if create_profile(cursor, module, msg, oracle_home, name, attribute_name, attribute_value): msg = 'Successfully created profile %s ' % name module.exit_json(msg=msg, changed=True) else: module.fail_json(msg=msg, changed=False) else: ensure_profile_state(cursor, module, msg, name, state, attribute_name, attribute_value) elif state == 'absent': if check_profile_exists(cursor, module, msg, name): if remove_profile(cursor, module, msg, oracle_home, name): msg = 'Profile %s successfully removed' % name module.exit_json(msg=msg, changed=True) else: module.exit_json(msg=msg, changed=False) else: msg = 'Profile %s doesn\'t exist' % name module.exit_json(msg=msg, changed=False) module.exit_json(msg="Unhandled exit", changed=False) from ansible.module_utils.basic import AnsibleModule, os if __name__ == '__main__': main()
true
true
f72468caade31cb02d08f32af7db7f38ca7ca5c2
1,858
py
Python
azure_iot_hub/azure/templates/led_matrix_esp32_iot_hub/__init__.py
codycodes/gix-mkrfridays-iot
8e41040114e58b55a07ecdef7c0f4a669a0b27dc
[ "MIT" ]
2
2020-05-17T05:04:22.000Z
2020-05-19T17:14:59.000Z
azure_iot_hub/azure/templates/led_matrix_esp32_iot_hub/__init__.py
codycodes/gix-mkrfridays-iot
8e41040114e58b55a07ecdef7c0f4a669a0b27dc
[ "MIT" ]
18
2020-05-13T22:51:50.000Z
2020-10-07T04:44:16.000Z
azure_iot_hub/azure/templates/led_matrix_esp32_iot_hub/__init__.py
codycodes/gix-mkrfridays-iot
8e41040114e58b55a07ecdef7c0f4a669a0b27dc
[ "MIT" ]
2
2020-04-24T21:25:42.000Z
2020-05-16T19:00:50.000Z
import logging import azure.functions as func from azure.iot.hub import IoTHubRegistryManager # Note that Azure Key Vault doesn't support underscores # and some other special chars; # we substitute with a hyphen for underscore CONNECTION_STRING = "{c2d connection string}" DEVICE_ID = "{device to invoke}" MESSAGE_COUNT = 1 def iothub_messaging_sample_run(msg): try: # IoTHubRegistryManager registry_manager = IoTHubRegistryManager(CONNECTION_STRING) for i in range(0, MESSAGE_COUNT): logging.info('Sending message: {0}'.format(i)) data = msg props = {} registry_manager.send_c2d_message( DEVICE_ID, data, properties=props) except Exception as ex: logging.info(f"Unexpected error {ex}") return def main(req: func.HttpRequest) -> func.HttpResponse: logging.info('Python HTTP trigger function processed a request.') msg = req.params.get('msg') if not msg: try: req_body = req.get_json() except ValueError: pass else: msg = req_body.get('msg') logging.info('***NOW EXECUTING C2D***') if msg: # TODO: this whitespace is to push some unicode chars off # the screen; can be removed later when Arduino code is # fixed iothub_messaging_sample_run(msg) return func.HttpResponse( f"Your text {msg} has been deployed to the" " device successfully!") else: return func.HttpResponse( "This HTTP triggered function executed successfully." " Pass a msg in the query string or in the request body" " for a personalized response.", status_code=200 )
32.034483
70
0.599569
import logging import azure.functions as func from azure.iot.hub import IoTHubRegistryManager # and some other special chars; # we substitute with a hyphen for underscore CONNECTION_STRING = "{c2d connection string}" DEVICE_ID = "{device to invoke}" MESSAGE_COUNT = 1 def iothub_messaging_sample_run(msg): try: # IoTHubRegistryManager registry_manager = IoTHubRegistryManager(CONNECTION_STRING) for i in range(0, MESSAGE_COUNT): logging.info('Sending message: {0}'.format(i)) data = msg props = {} registry_manager.send_c2d_message( DEVICE_ID, data, properties=props) except Exception as ex: logging.info(f"Unexpected error {ex}") return def main(req: func.HttpRequest) -> func.HttpResponse: logging.info('Python HTTP trigger function processed a request.') msg = req.params.get('msg') if not msg: try: req_body = req.get_json() except ValueError: pass else: msg = req_body.get('msg') logging.info('***NOW EXECUTING C2D***') if msg: # TODO: this whitespace is to push some unicode chars off # the screen; can be removed later when Arduino code is # fixed iothub_messaging_sample_run(msg) return func.HttpResponse( f"Your text {msg} has been deployed to the" " device successfully!") else: return func.HttpResponse( "This HTTP triggered function executed successfully." " Pass a msg in the query string or in the request body" " for a personalized response.", status_code=200 )
true
true
f72469575b1cfc70bf9f89f7c27364ebc94398cb
4,622
py
Python
images/orbit-controller/src/orbit_controller/webhooks/imagereplication_pod_webhook.py
srinivasreddych/aws-orbit-workbench
2d154addff58d26f5459a73c06148aaf5e9fad46
[ "Apache-2.0" ]
94
2021-03-19T19:55:11.000Z
2022-03-31T19:50:01.000Z
images/orbit-controller/src/orbit_controller/webhooks/imagereplication_pod_webhook.py
srinivasreddych/aws-orbit-workbench
2d154addff58d26f5459a73c06148aaf5e9fad46
[ "Apache-2.0" ]
410
2021-03-19T18:04:48.000Z
2022-03-22T13:56:53.000Z
images/orbit-controller/src/orbit_controller/webhooks/imagereplication_pod_webhook.py
srinivasreddych/aws-orbit-workbench
2d154addff58d26f5459a73c06148aaf5e9fad46
[ "Apache-2.0" ]
24
2021-03-19T23:16:23.000Z
2022-03-04T01:05:18.000Z
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from copy import deepcopy from typing import Any, Dict, List, Optional import kopf from orbit_controller import ORBIT_API_GROUP, ORBIT_API_VERSION, dynamic_client from orbit_controller.utils import imagereplication_utils CONFIG: Dict[str, Any] @kopf.on.startup() def configure(settings: kopf.OperatorSettings, logger: kopf.Logger, **_: Any) -> None: settings.admission.server = kopf.WebhookServer( cafile="/certs/ca.crt", certfile="/certs/tls.crt", pkeyfile="/certs/tls.key", port=443, ) settings.persistence.progress_storage = kopf.MultiProgressStorage( [ kopf.AnnotationsProgressStorage(prefix="orbit.aws"), kopf.StatusProgressStorage(field="status.orbit-aws"), ] ) settings.persistence.finalizer = "imagereplication-pod-webhook.orbit.aws/kopf-finalizer" settings.posting.level = logging.getLevelName(os.environ.get("EVENT_LOG_LEVEL", "INFO")) global CONFIG CONFIG = imagereplication_utils.get_config() logger.info("CONFIG: %s", CONFIG) def _check_replication_status(value: str, **_: Any) -> bool: return value not in ["Failed", "MaxAttemptsExceeded"] @kopf.index( # type: ignore ORBIT_API_GROUP, ORBIT_API_VERSION, "imagereplications", field="status.replication.replicationStatus", value=_check_replication_status, ) def imagereplications_idx(namespace: str, name: str, spec: kopf.Spec, status: kopf.Status, **_: Any) -> Dict[str, Any]: replication_status = status.get("replication", {}).get("replicationStatus", None) return { spec["destination"]: { "namespace": namespace, "name": name, "source": spec["source"], "replicationStatus": replication_status, } } @kopf.on.mutate("pods", id="update-pod-images") # type: ignore def update_pod_images( spec: kopf.Spec, patch: kopf.Patch, dryrun: bool, logger: kopf.Logger, imagereplications_idx: kopf.Index[str, str], **_: Any, ) -> kopf.Patch: if dryrun: logger.debug("DryRun - Skip Pod Mutation") return patch annotations = {} init_containers: List[Dict[str, Any]] = [] containers: List[Dict[str, Any]] = [] replications = {} def process_containers( src_containers: Optional[List[Dict[str, Any]]], dest_containers: List[Dict[str, Any]] ) -> None: for container in src_containers if src_containers else []: image = container.get("image", "") desired_image = imagereplication_utils.get_desired_image(image=image, config=CONFIG) if image != desired_image: container_copy = deepcopy(container) container_copy["image"] = desired_image dest_containers.append(container_copy) replications[image] = desired_image annotations[f"original-container-image~1{container['name']}"] = image process_containers(spec.get("initContainers", []), init_containers) process_containers(spec.get("containers", []), containers) if replications: client = dynamic_client() for source, destination in replications.items(): if not imagereplications_idx.get(destination, []): imagereplication_utils.create_imagereplication( namespace="orbit-system", source=source, destination=destination, client=client, logger=logger, ) else: logger.debug("Skipping ImageReplication Creation") if annotations: patch["metadata"] = {"annotations": annotations} patch["spec"] = {} if init_containers: patch["spec"]["initContainers"] = init_containers if containers: patch["spec"]["containers"] = containers logger.debug("Patch: %s", str(patch)) return patch
35.553846
119
0.650151
import logging import os from copy import deepcopy from typing import Any, Dict, List, Optional import kopf from orbit_controller import ORBIT_API_GROUP, ORBIT_API_VERSION, dynamic_client from orbit_controller.utils import imagereplication_utils CONFIG: Dict[str, Any] @kopf.on.startup() def configure(settings: kopf.OperatorSettings, logger: kopf.Logger, **_: Any) -> None: settings.admission.server = kopf.WebhookServer( cafile="/certs/ca.crt", certfile="/certs/tls.crt", pkeyfile="/certs/tls.key", port=443, ) settings.persistence.progress_storage = kopf.MultiProgressStorage( [ kopf.AnnotationsProgressStorage(prefix="orbit.aws"), kopf.StatusProgressStorage(field="status.orbit-aws"), ] ) settings.persistence.finalizer = "imagereplication-pod-webhook.orbit.aws/kopf-finalizer" settings.posting.level = logging.getLevelName(os.environ.get("EVENT_LOG_LEVEL", "INFO")) global CONFIG CONFIG = imagereplication_utils.get_config() logger.info("CONFIG: %s", CONFIG) def _check_replication_status(value: str, **_: Any) -> bool: return value not in ["Failed", "MaxAttemptsExceeded"] @kopf.index( ORBIT_API_GROUP, ORBIT_API_VERSION, "imagereplications", field="status.replication.replicationStatus", value=_check_replication_status, ) def imagereplications_idx(namespace: str, name: str, spec: kopf.Spec, status: kopf.Status, **_: Any) -> Dict[str, Any]: replication_status = status.get("replication", {}).get("replicationStatus", None) return { spec["destination"]: { "namespace": namespace, "name": name, "source": spec["source"], "replicationStatus": replication_status, } } @kopf.on.mutate("pods", id="update-pod-images") def update_pod_images( spec: kopf.Spec, patch: kopf.Patch, dryrun: bool, logger: kopf.Logger, imagereplications_idx: kopf.Index[str, str], **_: Any, ) -> kopf.Patch: if dryrun: logger.debug("DryRun - Skip Pod Mutation") return patch annotations = {} init_containers: List[Dict[str, Any]] = [] containers: List[Dict[str, Any]] = [] replications = {} def process_containers( src_containers: Optional[List[Dict[str, Any]]], dest_containers: List[Dict[str, Any]] ) -> None: for container in src_containers if src_containers else []: image = container.get("image", "") desired_image = imagereplication_utils.get_desired_image(image=image, config=CONFIG) if image != desired_image: container_copy = deepcopy(container) container_copy["image"] = desired_image dest_containers.append(container_copy) replications[image] = desired_image annotations[f"original-container-image~1{container['name']}"] = image process_containers(spec.get("initContainers", []), init_containers) process_containers(spec.get("containers", []), containers) if replications: client = dynamic_client() for source, destination in replications.items(): if not imagereplications_idx.get(destination, []): imagereplication_utils.create_imagereplication( namespace="orbit-system", source=source, destination=destination, client=client, logger=logger, ) else: logger.debug("Skipping ImageReplication Creation") if annotations: patch["metadata"] = {"annotations": annotations} patch["spec"] = {} if init_containers: patch["spec"]["initContainers"] = init_containers if containers: patch["spec"]["containers"] = containers logger.debug("Patch: %s", str(patch)) return patch
true
true
f724699755ba91bea524160dc912b306d1e17208
40
py
Python
boa3_test/example/tuple_test/BoolTuple.py
jplippi/neo3-boa
052be4adebb665113715bb80067d954f7ad85ad5
[ "Apache-2.0" ]
25
2020-07-22T19:37:43.000Z
2022-03-08T03:23:55.000Z
boa3_test/example/tuple_test/BoolTuple.py
jplippi/neo3-boa
052be4adebb665113715bb80067d954f7ad85ad5
[ "Apache-2.0" ]
419
2020-04-23T17:48:14.000Z
2022-03-31T13:17:45.000Z
boa3_test/example/tuple_test/BoolTuple.py
jplippi/neo3-boa
052be4adebb665113715bb80067d954f7ad85ad5
[ "Apache-2.0" ]
15
2020-05-21T21:54:24.000Z
2021-11-18T06:17:24.000Z
def Main(): a = (True, True, False)
13.333333
27
0.525
def Main(): a = (True, True, False)
true
true
f72469d5195720193ad83e59ae342ad8847b12fa
11,453
py
Python
planner_svg_gen.py
jc0a20/myplannergen
b1a19733fb393bae6159f12cfb1d239bb81d0c25
[ "MIT" ]
null
null
null
planner_svg_gen.py
jc0a20/myplannergen
b1a19733fb393bae6159f12cfb1d239bb81d0c25
[ "MIT" ]
2
2020-05-01T13:54:12.000Z
2020-05-01T13:55:24.000Z
planner_svg_gen.py
jc0a20/myplannergen
b1a19733fb393bae6159f12cfb1d239bb81d0c25
[ "MIT" ]
null
null
null
import configparser import os import re import subprocess import sys import PyPDF2 config = configparser.ConfigParser() config.read('config.ini', encoding='utf-8') INKSCAPE_PATH = config['DEFAULT']['InkscapePath'] def replace_text(target_doc, target_str, replace_str, id_str): pattern = '''id="''' + id_str + '''".+?>''' + target_str + '''<.+?</text>''' result = re.search(pattern, target_doc, re.S) id_trim_str = target_doc[result.span()[0]: result.span()[1]] pattern2 = ">" + target_str + "<" result2 = re.search(pattern2, id_trim_str, re.S) index_s, index_e = result.span()[0] + result2.span()[0] + 1, result.span()[0] + result2.span()[1] - 1 target_doc_new = target_doc[:index_s] + replace_str + target_doc[index_e:] return target_doc_new def replace_color(target_doc, replace_rgb, id_str): pattern = '''<rect.*?/>''' result = re.findall(pattern, target_doc, re.S) result_trim = "" for i in result: if id_str in i: result_trim = i result2 = re.search(result_trim, target_doc, re.S) tmp_trim_txt = target_doc[result2.span()[0]:result2.span()[1]] pattern2 = '''fill:#''' result3 = re.search(pattern2, tmp_trim_txt, re.S) index_s, index_e = result2.span()[0] + result3.span()[1], result2.span()[0] + result3.span()[1] + 6 target_doc_new = target_doc[:index_s] + replace_rgb + target_doc[index_e:] return target_doc_new def replace_blank(target_doc, replace_alpha, id_str): pattern = '''<rect.*?/>''' result = re.findall(pattern, target_doc, re.S) result_trim = "" for i in result: if id_str in i: result_trim = i result2 = re.search(result_trim, target_doc, re.S) tmp_trim_txt = target_doc[result2.span()[0]:result2.span()[1]] pattern2 = '''fill-opacity:''' result3 = re.search(pattern2, tmp_trim_txt, re.S) index_s, index_e = result2.span()[0] + result3.span()[1], result2.span()[0] + result3.span()[1] + 3 # print(target_doc[index_s:index_e]) target_doc_new = target_doc[:index_s] + str(replace_alpha) + target_doc[index_e:] # print(target_doc_new[index_s:index_s+10]) return target_doc_new day_ref_set = set([i for i in range(1,31+1,1)]) ''' 年、月、日、曜日、何の日、上書き色、何週目 2019,01,01,MON,New Year's Day,#ffb6c1,1 2019,01,02,TUE,2nd,#add8e6,1 2019,01,03,WED,3rd,,2 ''' WEEK_NUM_COLOR = ["#000000", "#8b4513", "#cd5c5c", "#ff8c00", "#ffd700", "#006400", "#4169e1", "#9400d3", "#808080"] # read_list_a_month=[[2020,1,1,"MON","New Year's Day","#ffb6c1",1], # [2020,1,2,"TUE","2nd","#add8e6",1], # [2020,1,3,"WED","3rd","",2]] with open('contents.csv', encoding='utf-8') as f: read_tmp = f.read() read_list = [i.split(',') for i in read_tmp.split('\n') if len(i)>0] read_list = read_list[1:] with open("template.svg", encoding='utf-8') as f: target_doc = f.read() with open("template_cutline.svg", encoding='utf-8') as f: target_doc_cutline = f.read() export_dir_str = "./export_svg/" # MONTH_HEADER,01 # YEAR_HEADER,2020 # DAY_01L-31L,33 # DOW_01L-31L,Sun # DAY_WNUM_01L-31L,01 # DAY_OF_NAME_01-31,DAY_OF_NAME # day_rect_01L # DAY_01R-31L,33 # DOW_01R-31L,Sun # day_rect_01R #["p1o", "p2o", "p3o", "p4o", "p5o", "p6o", "p7o", "p1u", "p2u", "p3u", "p4u", "p5u", "p6u", "p7u"] write_svg_filename_list = [] for iii in zip([1, 2, 3, 4, 5, 6, 7, -1, 0, 12, 11, 10, 9, 8], [0, 12, 11, 10, 9, 8, 7, -1, 1, 2, 3, 4, 5, 6], ["002", "004", "006", "008", "010", "012", "014", "001", "003", "005", "007", "009", "011", "013"]): # 断ち切り線ページ # if iii[0] == -1 and iii[1] == -1: target_doc_new = target_doc_cutline write_svg_filename = export_dir_str + iii[2] + ".svg" write_svg_filename_list.append(write_svg_filename) with open(write_svg_filename, mode='w', encoding='utf-8') as f: f.write(target_doc_new) continue else: target_doc_new = target_doc # 左ページ # left_month = iii[0] if left_month == 0: id_str, replace_alpha = "RECT_BLANK_L", 1 target_doc_new = replace_blank(target_doc_new, replace_alpha, id_str) else: id_str, replace_alpha = "RECT_BLANK_L", 0 target_doc_new = replace_blank(target_doc_new, replace_alpha, id_str) left_month = str(left_month) read_list_a_month = [rline for rline in read_list if rline[1] == left_month] READ_YEAR = read_list_a_month[0][0] READ_MONTH = read_list_a_month[0][1] # 年月 year_str = str(READ_YEAR).rjust(4, '0') id_str, target_str, replace_str = "YEAR_HEADER", "2020", year_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) # target_doc month_str = str(READ_MONTH).rjust(2, '0') id_str, target_str, replace_str = "MONTH_HEADER", "01", month_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) # 日の色(交互) 初期化 for i in range(1, 31, 1): day_str = str(i).rjust(2, '0') if i % 2 == 0: # 偶数 id_str, replace_rgb = "day_rect_" + day_str + "L", "FFFFFF" target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) else: # 奇数 id_str, replace_rgb = "day_rect_" + day_str + "L", "FFFFFF" target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) for yi, mi, di, dowi, dnamei, dcolori, wnumi, tmp1, tmp2, tmp3 in read_list_a_month: day_str = str(di).rjust(2, '0') id_str, target_str, replace_str = "DAY_" + day_str + "L", "33", day_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) day_of_week_str = dowi id_str, target_str, replace_str = "DOW_" + day_str + "L", "Sun", day_of_week_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) day_of_name_str = dnamei id_str, target_str, replace_str = "DAY_OF_NAME_" + day_str, "DAY_OF_NAME", day_of_name_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) week_num_str = wnumi id_str, target_str, replace_str = "DAY_WNUM_" + day_str + "L", "01", week_num_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) if len(dcolori) == 6: id_str, replace_rgb = "day_rect_" + day_str + "L", dcolori target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) # その月に存在しない日の処理 day_list = [int(di) for yi, mi, di, dowi, dnamei, dcolori, wnumi, tmp1, tmp2, tmp3 in read_list_a_month] day_list_diff = sorted(list(day_ref_set - set(day_list))) # その月に存在しない日のリスト for ddiff in day_list_diff: day_str = str(ddiff).rjust(2, '0') id_str, target_str, replace_str = "DAY_" + day_str + "L", "33", "" target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) day_of_week_str = '' id_str, target_str, replace_str = "DOW_" + day_str + "L", "Sun", day_of_week_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) day_of_name_str = '' id_str, target_str, replace_str = "DAY_OF_NAME_" + day_str, "DAY_OF_NAME", day_of_name_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) week_num_str = "" id_str, target_str, replace_str = "DAY_WNUM_" + day_str + "L", "01", week_num_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) id_str, replace_rgb = "day_rect_" + day_str + "L", "CCCCCC" target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) # 右ページ # right_month = iii[1] if right_month == 0: id_str, replace_alpha = "RECT_BLANK_R", 1 target_doc_new = replace_blank(target_doc_new, replace_alpha, id_str) else: id_str, replace_alpha = "RECT_BLANK_R", 0 target_doc_new = replace_blank(target_doc_new, replace_alpha, id_str) right_month = str(right_month) read_list_a_month = [rline for rline in read_list if rline[1] == right_month] READ_YEAR = read_list_a_month[0][0] READ_MONTH = read_list_a_month[0][1] # 日の色(交互) 初期化 for i in range(1, 31, 1): day_str = str(i).rjust(2, '0') if i % 2 == 0: # 偶数 id_str, replace_rgb = "day_rect_" + day_str + "R", "FFFFFF" target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) # target_doc_new else: # 奇数 id_str, replace_rgb = "day_rect_" + day_str + "R", "FFFFFF" target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) for yi, mi, di, dowi, dnamei, dcolori, wnumi, tmp1, tmp2, tmp3 in read_list_a_month: day_str = str(di).rjust(2, '0') id_str, target_str, replace_str = "DAY_" + day_str + "R", "33", day_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) day_of_week_str = dowi id_str, target_str, replace_str = "DOW_" + day_str + "R", "Sun", day_of_week_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) if len(dcolori) == 6: id_str, replace_rgb = "day_rect_" + day_str + "R", dcolori target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) # その月に存在しない日の処理 day_list = [int(di) for yi, mi, di, dowi, dnamei, dcolori, wnumi, tmp1, tmp2, tmp3 in read_list_a_month] day_list_diff = sorted(list(day_ref_set - set(day_list))) # その月に存在しない日のリスト for ddiff in day_list_diff: day_str = str(ddiff).rjust(2, '0') id_str, target_str, replace_str = "DAY_" + day_str + "R", "33", "" target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) day_of_week_str = '' id_str, target_str, replace_str = "DOW_" + day_str + "R", "Sun", day_of_week_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) id_str, replace_rgb = "day_rect_" + day_str + "R", "CCCCCC" target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) write_svg_filename = export_dir_str + iii[2] + ".svg" write_svg_filename_list.append(write_svg_filename) with open(write_svg_filename, mode='w', encoding='utf-8') as f: f.write(target_doc_new) write_pdf_filename_list = [] for fnamei in sorted(write_svg_filename_list): basename_without_ext = os.path.splitext(os.path.basename(fnamei))[0] export_pdf_filename = ".\\export_pdf\\{0}.pdf".format(basename_without_ext) write_pdf_filename_list.append(export_pdf_filename) print(fnamei,'->',export_pdf_filename) cmd = '''"{0}" -f {1} -A {2}'''.format(INKSCAPE_PATH,fnamei,export_pdf_filename) e = subprocess.call(cmd, shell=True) merger = PyPDF2.PdfFileMerger() for a_pdf_filename in write_pdf_filename_list: merger.append(a_pdf_filename) merger.write('./output.pdf') merger.close() sys.exit(0)
38.56229
115
0.628045
import configparser import os import re import subprocess import sys import PyPDF2 config = configparser.ConfigParser() config.read('config.ini', encoding='utf-8') INKSCAPE_PATH = config['DEFAULT']['InkscapePath'] def replace_text(target_doc, target_str, replace_str, id_str): pattern = '''id="''' + id_str + '''".+?>''' + target_str + '''<.+?</text>''' result = re.search(pattern, target_doc, re.S) id_trim_str = target_doc[result.span()[0]: result.span()[1]] pattern2 = ">" + target_str + "<" result2 = re.search(pattern2, id_trim_str, re.S) index_s, index_e = result.span()[0] + result2.span()[0] + 1, result.span()[0] + result2.span()[1] - 1 target_doc_new = target_doc[:index_s] + replace_str + target_doc[index_e:] return target_doc_new def replace_color(target_doc, replace_rgb, id_str): pattern = '''<rect.*?/>''' result = re.findall(pattern, target_doc, re.S) result_trim = "" for i in result: if id_str in i: result_trim = i result2 = re.search(result_trim, target_doc, re.S) tmp_trim_txt = target_doc[result2.span()[0]:result2.span()[1]] pattern2 = '''fill:#''' result3 = re.search(pattern2, tmp_trim_txt, re.S) index_s, index_e = result2.span()[0] + result3.span()[1], result2.span()[0] + result3.span()[1] + 6 target_doc_new = target_doc[:index_s] + replace_rgb + target_doc[index_e:] return target_doc_new def replace_blank(target_doc, replace_alpha, id_str): pattern = '''<rect.*?/>''' result = re.findall(pattern, target_doc, re.S) result_trim = "" for i in result: if id_str in i: result_trim = i result2 = re.search(result_trim, target_doc, re.S) tmp_trim_txt = target_doc[result2.span()[0]:result2.span()[1]] pattern2 = '''fill-opacity:''' result3 = re.search(pattern2, tmp_trim_txt, re.S) index_s, index_e = result2.span()[0] + result3.span()[1], result2.span()[0] + result3.span()[1] + 3 target_doc_new = target_doc[:index_s] + str(replace_alpha) + target_doc[index_e:] return target_doc_new day_ref_set = set([i for i in range(1,31+1,1)]) WEEK_NUM_COLOR = ["#000000", "#8b4513", "#cd5c5c", "#ff8c00", "#ffd700", "#006400", "#4169e1", "#9400d3", "#808080"] # [2020,1,2,"TUE","2nd","#add8e6",1], # [2020,1,3,"WED","3rd","",2]] with open('contents.csv', encoding='utf-8') as f: read_tmp = f.read() read_list = [i.split(',') for i in read_tmp.split('\n') if len(i)>0] read_list = read_list[1:] with open("template.svg", encoding='utf-8') as f: target_doc = f.read() with open("template_cutline.svg", encoding='utf-8') as f: target_doc_cutline = f.read() export_dir_str = "./export_svg/" # MONTH_HEADER,01 # YEAR_HEADER,2020 # DAY_01L-31L,33 # DOW_01L-31L,Sun # DAY_WNUM_01L-31L,01 # DAY_OF_NAME_01-31,DAY_OF_NAME # day_rect_01L # DAY_01R-31L,33 # DOW_01R-31L,Sun # day_rect_01R #["p1o", "p2o", "p3o", "p4o", "p5o", "p6o", "p7o", "p1u", "p2u", "p3u", "p4u", "p5u", "p6u", "p7u"] write_svg_filename_list = [] for iii in zip([1, 2, 3, 4, 5, 6, 7, -1, 0, 12, 11, 10, 9, 8], [0, 12, 11, 10, 9, 8, 7, -1, 1, 2, 3, 4, 5, 6], ["002", "004", "006", "008", "010", "012", "014", "001", "003", "005", "007", "009", "011", "013"]): # 断ち切り線ページ # if iii[0] == -1 and iii[1] == -1: target_doc_new = target_doc_cutline write_svg_filename = export_dir_str + iii[2] + ".svg" write_svg_filename_list.append(write_svg_filename) with open(write_svg_filename, mode='w', encoding='utf-8') as f: f.write(target_doc_new) continue else: target_doc_new = target_doc # 左ページ # left_month = iii[0] if left_month == 0: id_str, replace_alpha = "RECT_BLANK_L", 1 target_doc_new = replace_blank(target_doc_new, replace_alpha, id_str) else: id_str, replace_alpha = "RECT_BLANK_L", 0 target_doc_new = replace_blank(target_doc_new, replace_alpha, id_str) left_month = str(left_month) read_list_a_month = [rline for rline in read_list if rline[1] == left_month] READ_YEAR = read_list_a_month[0][0] READ_MONTH = read_list_a_month[0][1] # 年月 year_str = str(READ_YEAR).rjust(4, '0') id_str, target_str, replace_str = "YEAR_HEADER", "2020", year_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) # target_doc month_str = str(READ_MONTH).rjust(2, '0') id_str, target_str, replace_str = "MONTH_HEADER", "01", month_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) # 日の色(交互) 初期化 for i in range(1, 31, 1): day_str = str(i).rjust(2, '0') if i % 2 == 0: # 偶数 id_str, replace_rgb = "day_rect_" + day_str + "L", "FFFFFF" target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) else: # 奇数 id_str, replace_rgb = "day_rect_" + day_str + "L", "FFFFFF" target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) for yi, mi, di, dowi, dnamei, dcolori, wnumi, tmp1, tmp2, tmp3 in read_list_a_month: day_str = str(di).rjust(2, '0') id_str, target_str, replace_str = "DAY_" + day_str + "L", "33", day_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) day_of_week_str = dowi id_str, target_str, replace_str = "DOW_" + day_str + "L", "Sun", day_of_week_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) day_of_name_str = dnamei id_str, target_str, replace_str = "DAY_OF_NAME_" + day_str, "DAY_OF_NAME", day_of_name_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) week_num_str = wnumi id_str, target_str, replace_str = "DAY_WNUM_" + day_str + "L", "01", week_num_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) if len(dcolori) == 6: id_str, replace_rgb = "day_rect_" + day_str + "L", dcolori target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) # その月に存在しない日の処理 day_list = [int(di) for yi, mi, di, dowi, dnamei, dcolori, wnumi, tmp1, tmp2, tmp3 in read_list_a_month] day_list_diff = sorted(list(day_ref_set - set(day_list))) # その月に存在しない日のリスト for ddiff in day_list_diff: day_str = str(ddiff).rjust(2, '0') id_str, target_str, replace_str = "DAY_" + day_str + "L", "33", "" target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) day_of_week_str = '' id_str, target_str, replace_str = "DOW_" + day_str + "L", "Sun", day_of_week_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) day_of_name_str = '' id_str, target_str, replace_str = "DAY_OF_NAME_" + day_str, "DAY_OF_NAME", day_of_name_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) week_num_str = "" id_str, target_str, replace_str = "DAY_WNUM_" + day_str + "L", "01", week_num_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) id_str, replace_rgb = "day_rect_" + day_str + "L", "CCCCCC" target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) # 右ページ # right_month = iii[1] if right_month == 0: id_str, replace_alpha = "RECT_BLANK_R", 1 target_doc_new = replace_blank(target_doc_new, replace_alpha, id_str) else: id_str, replace_alpha = "RECT_BLANK_R", 0 target_doc_new = replace_blank(target_doc_new, replace_alpha, id_str) right_month = str(right_month) read_list_a_month = [rline for rline in read_list if rline[1] == right_month] READ_YEAR = read_list_a_month[0][0] READ_MONTH = read_list_a_month[0][1] # 日の色(交互) 初期化 for i in range(1, 31, 1): day_str = str(i).rjust(2, '0') if i % 2 == 0: # 偶数 id_str, replace_rgb = "day_rect_" + day_str + "R", "FFFFFF" target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) # target_doc_new else: # 奇数 id_str, replace_rgb = "day_rect_" + day_str + "R", "FFFFFF" target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) for yi, mi, di, dowi, dnamei, dcolori, wnumi, tmp1, tmp2, tmp3 in read_list_a_month: day_str = str(di).rjust(2, '0') id_str, target_str, replace_str = "DAY_" + day_str + "R", "33", day_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) day_of_week_str = dowi id_str, target_str, replace_str = "DOW_" + day_str + "R", "Sun", day_of_week_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) if len(dcolori) == 6: id_str, replace_rgb = "day_rect_" + day_str + "R", dcolori target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) # その月に存在しない日の処理 day_list = [int(di) for yi, mi, di, dowi, dnamei, dcolori, wnumi, tmp1, tmp2, tmp3 in read_list_a_month] day_list_diff = sorted(list(day_ref_set - set(day_list))) # その月に存在しない日のリスト for ddiff in day_list_diff: day_str = str(ddiff).rjust(2, '0') id_str, target_str, replace_str = "DAY_" + day_str + "R", "33", "" target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) day_of_week_str = '' id_str, target_str, replace_str = "DOW_" + day_str + "R", "Sun", day_of_week_str target_doc_new = replace_text(target_doc_new, target_str, replace_str, id_str) id_str, replace_rgb = "day_rect_" + day_str + "R", "CCCCCC" target_doc_new = replace_color(target_doc_new, replace_rgb, id_str) write_svg_filename = export_dir_str + iii[2] + ".svg" write_svg_filename_list.append(write_svg_filename) with open(write_svg_filename, mode='w', encoding='utf-8') as f: f.write(target_doc_new) write_pdf_filename_list = [] for fnamei in sorted(write_svg_filename_list): basename_without_ext = os.path.splitext(os.path.basename(fnamei))[0] export_pdf_filename = ".\\export_pdf\\{0}.pdf".format(basename_without_ext) write_pdf_filename_list.append(export_pdf_filename) print(fnamei,'->',export_pdf_filename) cmd = '''"{0}" -f {1} -A {2}'''.format(INKSCAPE_PATH,fnamei,export_pdf_filename) e = subprocess.call(cmd, shell=True) merger = PyPDF2.PdfFileMerger() for a_pdf_filename in write_pdf_filename_list: merger.append(a_pdf_filename) merger.write('./output.pdf') merger.close() sys.exit(0)
true
true
f7246ae56a8cd487abd73fdf982a117465640f8f
6,658
py
Python
pciids/pciids.py
ilkermanap/python-pciids
9a2fcb00d3e0100b9de331047133a42e98242deb
[ "MIT" ]
5
2018-02-12T13:45:45.000Z
2020-06-04T09:24:52.000Z
pciids/pciids.py
ilkermanap/python-pciids
9a2fcb00d3e0100b9de331047133a42e98242deb
[ "MIT" ]
2
2016-03-17T09:22:01.000Z
2020-01-12T23:04:35.000Z
pciids/pciids.py
ilkermanap/python-pciids
9a2fcb00d3e0100b9de331047133a42e98242deb
[ "MIT" ]
4
2018-09-04T12:57:58.000Z
2021-07-02T01:01:26.000Z
import os import bz2 import requests import glob global HOME HOME = "https://pci-ids.ucw.cz" class Vendor: """ Class for vendors. This is the top level class for the devices belong to a specific vendor. self.devices is the device dictionary subdevices are in each device. """ def __init__(self, vendorStr): """ Class initializes with the raw line from pci.ids Parsing takes place inside __init__ """ self.ID = vendorStr.split()[0] self.name = vendorStr.replace("%s " % self.ID,"") self.devices = {} def addDevice(self, deviceStr): """ Adds a device to self.devices takes the raw line from pci.ids """ s = deviceStr.strip() devID = s.split()[0] if devID in self.devices: pass else: self.devices[devID] = Device(deviceStr) def report(self): print( self.ID, self.name) for id, dev in self.devices.items(): dev.report() class Device: def __init__(self, deviceStr): """ Class for each device. Each vendor has its own devices dictionary. """ s = deviceStr.strip() self.ID = s.split()[0] self.name = s.replace("%s " % self.ID,"") self.subdevices = {} def report(self): print("\t%s\t%s" % (self.ID, self.name)) for subID, subdev in self.subdevices.items(): subdev.report() def addSubDevice(self, subDeviceStr): """ Adds a subvendor, subdevice to device. Uses raw line from pci.ids """ s = subDeviceStr.strip() spl = s.split() subVendorID = spl[0] subDeviceID = spl[1] subDeviceName = s.split(" ")[-1] devID = "%s:%s" % (subVendorID,subDeviceID) self.subdevices[devID] = SubDevice(subVendorID,subDeviceID,subDeviceName) class SubDevice: """ Class for subdevices. """ def __init__(self, vendor, device, name): """ Class initializes with vendorid, deviceid and name """ self.vendorID = vendor self.deviceID = device self.name = name def report(self): print( "\t\t%s\t%s\t%s" % (self.vendorID, self.deviceID,self.name)) class PCIIds: """ Top class for all pci.ids entries. All queries will be asked to this class. PCIIds.vendors["0e11"].devices["0046"].subdevices["0e11:4091"].name = "Smart Array 6i" """ def __init__(self, url=HOME): """ Prepares the directories. Checks local data file. Tries to load from local, if not found, downloads from web """ self.url = url self.version = "" self.date = "" self.compressed = "pci.ids.bz2" if (os.path.isdir("data") is False): os.mkdir("data") self.vendors = {} self.contents = None self.loadLocal() self.parse() def reportVendors(self): """Reports the vendors """ for vid, v in self.vendors.items(): print( v.ID, v.name) def report(self, vendor = None): """ Reports everything for all vendors or a specific vendor PCIIds.report() reports everything PCIIDs.report("0e11") reports only "Compaq Computer Corporation" """ if vendor != None: self.vendors[vendor].report() else: for vID, v in self.vendors.items(): v.report() def findDate(self, content): for l in content: if l.find("Date:") > -1: return l.split()[-2].replace("-", "") return None def parse(self): if len(self.contents) < 1: print( "data/%s-pci.ids not found" % self.date) else: vendorID = "" deviceID = "" for l in self.contents: if l[0] == "#": continue elif len(l.strip()) == 0: continue else: if l.find("\t\t") == 0: self.vendors[vendorID].devices[deviceID].addSubDevice(l) elif l.find("\t") == 0: deviceID = l.strip().split()[0] self.vendors[vendorID].addDevice(l) else: vendorID = l.split()[0] self.vendors[vendorID] = Vendor(l) def getLatest(self): ver, date, url = self.latestVersion() outfile = "data/%s-%s" % (date, self.compressed[:-4]) # remove bz2 out = open(outfile, "wb") resp = requests.get(url) out.write(bz2.decompress(resp.content)) out.close() self.version = ver self.date = date self.readLocal() def readLocal(self): """ Reads the local file """ self.contents = open("data/%s-pci.ids" % self.date).readlines() self.date = self.findDate(self.contents) def loadLocal(self): """ Loads database from local. If there is no file, it creates a new one from web """ idsfile = glob.glob("data/*.ids") if len(idsfile) == 0: self.getLatest() else: self.date = idsfile[0].split("/")[1].split("-")[0] self.readLocal() def latestVersion(self): """ Checks the latest version from web """ resp = requests.get(self.url) webPage = resp.content.decode().splitlines() for line in webPage: if line.find(self.compressed) > -1: print(line) for tag in line.split("<"): if tag.find(self.compressed) > -1: path = tag.split('"')[1] ver = path.split("/")[1] url = "%s%s" % (self.url, path) urlUncompressed = url.replace(".bz2","") resp2 = requests.get(urlUncompressed) con = resp2.content.decode().splitlines() for i in range(10): l = con[i] if l.find("Date:") > -1: date = l.split()[-2].replace("-","") break return (ver, date, "%s%s" % (HOME, path)) break return "" if __name__ == "__main__": id = PCIIds() #id.reportVendors()
30.541284
92
0.49249
import os import bz2 import requests import glob global HOME HOME = "https://pci-ids.ucw.cz" class Vendor: def __init__(self, vendorStr): self.ID = vendorStr.split()[0] self.name = vendorStr.replace("%s " % self.ID,"") self.devices = {} def addDevice(self, deviceStr): s = deviceStr.strip() devID = s.split()[0] if devID in self.devices: pass else: self.devices[devID] = Device(deviceStr) def report(self): print( self.ID, self.name) for id, dev in self.devices.items(): dev.report() class Device: def __init__(self, deviceStr): s = deviceStr.strip() self.ID = s.split()[0] self.name = s.replace("%s " % self.ID,"") self.subdevices = {} def report(self): print("\t%s\t%s" % (self.ID, self.name)) for subID, subdev in self.subdevices.items(): subdev.report() def addSubDevice(self, subDeviceStr): s = subDeviceStr.strip() spl = s.split() subVendorID = spl[0] subDeviceID = spl[1] subDeviceName = s.split(" ")[-1] devID = "%s:%s" % (subVendorID,subDeviceID) self.subdevices[devID] = SubDevice(subVendorID,subDeviceID,subDeviceName) class SubDevice: def __init__(self, vendor, device, name): self.vendorID = vendor self.deviceID = device self.name = name def report(self): print( "\t\t%s\t%s\t%s" % (self.vendorID, self.deviceID,self.name)) class PCIIds: def __init__(self, url=HOME): self.url = url self.version = "" self.date = "" self.compressed = "pci.ids.bz2" if (os.path.isdir("data") is False): os.mkdir("data") self.vendors = {} self.contents = None self.loadLocal() self.parse() def reportVendors(self): for vid, v in self.vendors.items(): print( v.ID, v.name) def report(self, vendor = None): if vendor != None: self.vendors[vendor].report() else: for vID, v in self.vendors.items(): v.report() def findDate(self, content): for l in content: if l.find("Date:") > -1: return l.split()[-2].replace("-", "") return None def parse(self): if len(self.contents) < 1: print( "data/%s-pci.ids not found" % self.date) else: vendorID = "" deviceID = "" for l in self.contents: if l[0] == "#": continue elif len(l.strip()) == 0: continue else: if l.find("\t\t") == 0: self.vendors[vendorID].devices[deviceID].addSubDevice(l) elif l.find("\t") == 0: deviceID = l.strip().split()[0] self.vendors[vendorID].addDevice(l) else: vendorID = l.split()[0] self.vendors[vendorID] = Vendor(l) def getLatest(self): ver, date, url = self.latestVersion() outfile = "data/%s-%s" % (date, self.compressed[:-4]) out = open(outfile, "wb") resp = requests.get(url) out.write(bz2.decompress(resp.content)) out.close() self.version = ver self.date = date self.readLocal() def readLocal(self): self.contents = open("data/%s-pci.ids" % self.date).readlines() self.date = self.findDate(self.contents) def loadLocal(self): idsfile = glob.glob("data/*.ids") if len(idsfile) == 0: self.getLatest() else: self.date = idsfile[0].split("/")[1].split("-")[0] self.readLocal() def latestVersion(self): resp = requests.get(self.url) webPage = resp.content.decode().splitlines() for line in webPage: if line.find(self.compressed) > -1: print(line) for tag in line.split("<"): if tag.find(self.compressed) > -1: path = tag.split('"')[1] ver = path.split("/")[1] url = "%s%s" % (self.url, path) urlUncompressed = url.replace(".bz2","") resp2 = requests.get(urlUncompressed) con = resp2.content.decode().splitlines() for i in range(10): l = con[i] if l.find("Date:") > -1: date = l.split()[-2].replace("-","") break return (ver, date, "%s%s" % (HOME, path)) break return "" if __name__ == "__main__": id = PCIIds() #id.reportVendors()
true
true
f7246b1f729e630e5873f854feff0a703ae952c5
3,564
py
Python
test/functional/feature_notifications.py
cisnes/PINECOIN
a0252cace17ecc1208a07368c0b893d3878459d8
[ "MIT" ]
null
null
null
test/functional/feature_notifications.py
cisnes/PINECOIN
a0252cace17ecc1208a07368c0b893d3878459d8
[ "MIT" ]
null
null
null
test/functional/feature_notifications.py
cisnes/PINECOIN
a0252cace17ecc1208a07368c0b893d3878459d8
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the -alertnotify, -blocknotify and -walletnotify options.""" import os from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE from test_framework.test_framework import PineCoinTestFramework from test_framework.util import assert_equal, wait_until, connect_nodes_bi class NotificationsTest(PineCoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def setup_network(self): self.alertnotify_dir = os.path.join(self.options.tmpdir, "alertnotify") self.blocknotify_dir = os.path.join(self.options.tmpdir, "blocknotify") self.walletnotify_dir = os.path.join(self.options.tmpdir, "walletnotify") os.mkdir(self.alertnotify_dir) os.mkdir(self.blocknotify_dir) os.mkdir(self.walletnotify_dir) # -alertnotify and -blocknotify on node0, walletnotify on node1 self.extra_args = [[ "-alertnotify=echo > {}".format(os.path.join(self.alertnotify_dir, '%s')), "-blocknotify=echo > {}".format(os.path.join(self.blocknotify_dir, '%s'))], ["-blockversion=211", "-rescan", "-walletnotify=echo > {}".format(os.path.join(self.walletnotify_dir, '%s'))]] super().setup_network() def run_test(self): self.log.info("test -blocknotify") block_count = 10 blocks = self.nodes[1].generatetoaddress(block_count, self.nodes[1].getnewaddress() if self.is_wallet_compiled() else ADDRESS_BCRT1_UNSPENDABLE) # wait at most 10 seconds for expected number of files before reading the content wait_until(lambda: len(os.listdir(self.blocknotify_dir)) == block_count, timeout=10) # directory content should equal the generated blocks hashes assert_equal(sorted(blocks), sorted(os.listdir(self.blocknotify_dir))) if self.is_wallet_compiled(): self.log.info("test -walletnotify") # wait at most 10 seconds for expected number of files before reading the content wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10) # directory content should equal the generated transaction hashes txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir))) self.stop_node(1) for tx_file in os.listdir(self.walletnotify_dir): os.remove(os.path.join(self.walletnotify_dir, tx_file)) self.log.info("test -walletnotify after rescan") # restart node to rescan to force wallet notifications self.start_node(1) connect_nodes_bi(self.nodes, 0, 1) wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10) # directory content should equal the generated transaction hashes txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir))) # TODO: add test for `-alertnotify` large fork notifications if __name__ == '__main__': NotificationsTest().main()
48.821918
152
0.670034
import os from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE from test_framework.test_framework import PineCoinTestFramework from test_framework.util import assert_equal, wait_until, connect_nodes_bi class NotificationsTest(PineCoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def setup_network(self): self.alertnotify_dir = os.path.join(self.options.tmpdir, "alertnotify") self.blocknotify_dir = os.path.join(self.options.tmpdir, "blocknotify") self.walletnotify_dir = os.path.join(self.options.tmpdir, "walletnotify") os.mkdir(self.alertnotify_dir) os.mkdir(self.blocknotify_dir) os.mkdir(self.walletnotify_dir) self.extra_args = [[ "-alertnotify=echo > {}".format(os.path.join(self.alertnotify_dir, '%s')), "-blocknotify=echo > {}".format(os.path.join(self.blocknotify_dir, '%s'))], ["-blockversion=211", "-rescan", "-walletnotify=echo > {}".format(os.path.join(self.walletnotify_dir, '%s'))]] super().setup_network() def run_test(self): self.log.info("test -blocknotify") block_count = 10 blocks = self.nodes[1].generatetoaddress(block_count, self.nodes[1].getnewaddress() if self.is_wallet_compiled() else ADDRESS_BCRT1_UNSPENDABLE) wait_until(lambda: len(os.listdir(self.blocknotify_dir)) == block_count, timeout=10) assert_equal(sorted(blocks), sorted(os.listdir(self.blocknotify_dir))) if self.is_wallet_compiled(): self.log.info("test -walletnotify") wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10) txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir))) self.stop_node(1) for tx_file in os.listdir(self.walletnotify_dir): os.remove(os.path.join(self.walletnotify_dir, tx_file)) self.log.info("test -walletnotify after rescan") self.start_node(1) connect_nodes_bi(self.nodes, 0, 1) wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10) txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir))) if __name__ == '__main__': NotificationsTest().main()
true
true
f7246b36becdc63ea725194e371727a55e6be4c1
3,932
py
Python
test/functional/test_framework/blocktools.py
tmiholdings/tmi
f1b6027f025dafc40616cde076df2f4b8cdae8a2
[ "MIT" ]
null
null
null
test/functional/test_framework/blocktools.py
tmiholdings/tmi
f1b6027f025dafc40616cde076df2f4b8cdae8a2
[ "MIT" ]
null
null
null
test/functional/test_framework/blocktools.py
tmiholdings/tmi
f1b6027f025dafc40616cde076df2f4b8cdae8a2
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The TMIcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for manipulating blocks and transactions.""" from .mininode import * from .script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN # Create a block (with regtest difficulty) def create_block(hashprev, coinbase, nTime=None): block = CBlock() if nTime is None: import time block.nTime = int(time.time()+600) else: block.nTime = nTime block.hashPrevBlock = hashprev block.nBits = 0x207fffff # Will break after a difficulty adjustment... block.vtx.append(coinbase) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() return block # From BIP141 WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed" def get_witness_script(witness_root, witness_nonce): witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(witness_nonce))) output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment) return CScript([OP_RETURN, output_data]) # According to BIP141, blocks with witness rules active must commit to the # hash of all in-block transactions including witness. def add_witness_commitment(block, nonce=0): # First calculate the merkle root of the block's # transactions, with witnesses. witness_nonce = nonce witness_root = block.calc_witness_merkle_root() # witness_nonce should go to coinbase witness. block.vtx[0].wit.vtxinwit = [CTxInWitness()] block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)] # witness commitment is the last OP_RETURN output in coinbase block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce))) block.vtx[0].rehash() block.hashMerkleRoot = block.calc_merkle_root() block.rehash() def serialize_script_num(value): r = bytearray(0) if value == 0: return r neg = value < 0 absvalue = -value if neg else value while (absvalue): r.append(int(absvalue & 0xff)) absvalue >>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: r[-1] |= 0x80 return r # Create a coinbase transaction, assuming no miner fees. # If pubkey is passed in, the coinbase output will be a P2PK output; # otherwise an anyone-can-spend output. def create_coinbase(height, pubkey = None): coinbase = CTransaction() coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), ser_string(serialize_script_num(height)), 0xffffffff)) coinbaseoutput = CTxOut() coinbaseoutput.nValue = 50 * COIN halvings = int(height/150) # regtest coinbaseoutput.nValue >>= halvings if (pubkey != None): coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG]) else: coinbaseoutput.scriptPubKey = CScript([OP_TRUE]) coinbase.vout = [ coinbaseoutput ] coinbase.calc_sha256() return coinbase # Create a transaction. # If the scriptPubKey is not specified, make it anyone-can-spend. def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()): tx = CTransaction() assert(n < len(prevtx.vout)) tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff)) tx.vout.append(CTxOut(value, scriptPubKey)) tx.calc_sha256() return tx def get_legacy_sigopcount_block(block, fAccurate=True): count = 0 for tx in block.vtx: count += get_legacy_sigopcount_tx(tx, fAccurate) return count def get_legacy_sigopcount_tx(tx, fAccurate=True): count = 0 for i in tx.vout: count += i.scriptPubKey.GetSigOpCount(fAccurate) for j in tx.vin: # scriptSig might be of type bytes, so convert to CScript for the moment count += CScript(j.scriptSig).GetSigOpCount(fAccurate) return count
35.423423
104
0.709054
from .mininode import * from .script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN def create_block(hashprev, coinbase, nTime=None): block = CBlock() if nTime is None: import time block.nTime = int(time.time()+600) else: block.nTime = nTime block.hashPrevBlock = hashprev block.nBits = 0x207fffff block.vtx.append(coinbase) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() return block WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed" def get_witness_script(witness_root, witness_nonce): witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(witness_nonce))) output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment) return CScript([OP_RETURN, output_data]) def add_witness_commitment(block, nonce=0): # transactions, with witnesses. witness_nonce = nonce witness_root = block.calc_witness_merkle_root() # witness_nonce should go to coinbase witness. block.vtx[0].wit.vtxinwit = [CTxInWitness()] block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)] # witness commitment is the last OP_RETURN output in coinbase block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce))) block.vtx[0].rehash() block.hashMerkleRoot = block.calc_merkle_root() block.rehash() def serialize_script_num(value): r = bytearray(0) if value == 0: return r neg = value < 0 absvalue = -value if neg else value while (absvalue): r.append(int(absvalue & 0xff)) absvalue >>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: r[-1] |= 0x80 return r # Create a coinbase transaction, assuming no miner fees. # If pubkey is passed in, the coinbase output will be a P2PK output; # otherwise an anyone-can-spend output. def create_coinbase(height, pubkey = None): coinbase = CTransaction() coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), ser_string(serialize_script_num(height)), 0xffffffff)) coinbaseoutput = CTxOut() coinbaseoutput.nValue = 50 * COIN halvings = int(height/150) # regtest coinbaseoutput.nValue >>= halvings if (pubkey != None): coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG]) else: coinbaseoutput.scriptPubKey = CScript([OP_TRUE]) coinbase.vout = [ coinbaseoutput ] coinbase.calc_sha256() return coinbase # Create a transaction. # If the scriptPubKey is not specified, make it anyone-can-spend. def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()): tx = CTransaction() assert(n < len(prevtx.vout)) tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff)) tx.vout.append(CTxOut(value, scriptPubKey)) tx.calc_sha256() return tx def get_legacy_sigopcount_block(block, fAccurate=True): count = 0 for tx in block.vtx: count += get_legacy_sigopcount_tx(tx, fAccurate) return count def get_legacy_sigopcount_tx(tx, fAccurate=True): count = 0 for i in tx.vout: count += i.scriptPubKey.GetSigOpCount(fAccurate) for j in tx.vin: # scriptSig might be of type bytes, so convert to CScript for the moment count += CScript(j.scriptSig).GetSigOpCount(fAccurate) return count
true
true
f7246bf98a05892332d4bb0c595fed3c4643f2dc
2,401
py
Python
src/pudl/__init__.py
erictleung/pudl
32bfbf3a959114f766b630f5b873a93b7a930c71
[ "MIT" ]
null
null
null
src/pudl/__init__.py
erictleung/pudl
32bfbf3a959114f766b630f5b873a93b7a930c71
[ "MIT" ]
null
null
null
src/pudl/__init__.py
erictleung/pudl
32bfbf3a959114f766b630f5b873a93b7a930c71
[ "MIT" ]
null
null
null
"""The Public Utility Data Liberation (PUDL) Project.""" # Create a parent logger for all PUDL loggers to inherit from import logging import pkg_resources import pudl.analysis.mcoe import pudl.cli import pudl.constants import pudl.convert.datapkg_to_sqlite import pudl.convert.epacems_to_parquet import pudl.convert.ferc1_to_sqlite import pudl.convert.flatten_datapkgs import pudl.etl import pudl.extract.eia860 import pudl.extract.eia923 import pudl.extract.epacems import pudl.extract.epaipm import pudl.extract.ferc1 import pudl.glue.ferc1_eia import pudl.helpers import pudl.load.csv import pudl.load.metadata # Output modules by data source: import pudl.output.eia860 import pudl.output.eia923 import pudl.output.ferc1 import pudl.output.glue import pudl.output.pudltabl # Transformation functions, organized by data source: import pudl.transform.eia import pudl.transform.eia860 import pudl.transform.eia923 import pudl.transform.epacems import pudl.transform.epaipm import pudl.transform.ferc1 # Deployed data & workspace management import pudl.validate import pudl.workspace.datastore import pudl.workspace.setup # noqa: F401 WTF is this showing up as unused? __author__ = "Catalyst Cooperative" __contact__ = "pudl@catalyst.coop" __maintainer__ = "Catalyst Cooperative" __license__ = "MIT License" __maintainer_email__ = "zane.selvans@catalyst.coop" __version__ = pkg_resources.get_distribution("catalystcoop.pudl").version __docformat__ = "restructuredtext en" __description__ = "Tools for liberating public US electric utility data." __long_description__ = """ This Public Utility Data Liberation (PUDL) project is a collection of tools that allow programmatic access to and manipulation of many public data sets related to electric utilities in the United States. These data sets are often collected by state and federal agencies, but are publicized in ways that are not well standardized, or intended for interoperability. PUDL seeks to allow more transparent and useful access to this important public data, with the goal of enabling climate advocates, academic researchers, and data journalists to better understand the electricity system and its impacts on climate. """ __pythonrequiredversion__ = "3.7" __projecturl__ = "https://catalyst.coop/pudl/" __downloadurl__ = "https://github.com/catalyst-cooperative/pudl/" logging.getLogger(__name__).addHandler(logging.NullHandler())
35.835821
76
0.82299
import logging import pkg_resources import pudl.analysis.mcoe import pudl.cli import pudl.constants import pudl.convert.datapkg_to_sqlite import pudl.convert.epacems_to_parquet import pudl.convert.ferc1_to_sqlite import pudl.convert.flatten_datapkgs import pudl.etl import pudl.extract.eia860 import pudl.extract.eia923 import pudl.extract.epacems import pudl.extract.epaipm import pudl.extract.ferc1 import pudl.glue.ferc1_eia import pudl.helpers import pudl.load.csv import pudl.load.metadata import pudl.output.eia860 import pudl.output.eia923 import pudl.output.ferc1 import pudl.output.glue import pudl.output.pudltabl import pudl.transform.eia import pudl.transform.eia860 import pudl.transform.eia923 import pudl.transform.epacems import pudl.transform.epaipm import pudl.transform.ferc1 import pudl.validate import pudl.workspace.datastore import pudl.workspace.setup __author__ = "Catalyst Cooperative" __contact__ = "pudl@catalyst.coop" __maintainer__ = "Catalyst Cooperative" __license__ = "MIT License" __maintainer_email__ = "zane.selvans@catalyst.coop" __version__ = pkg_resources.get_distribution("catalystcoop.pudl").version __docformat__ = "restructuredtext en" __description__ = "Tools for liberating public US electric utility data." __long_description__ = """ This Public Utility Data Liberation (PUDL) project is a collection of tools that allow programmatic access to and manipulation of many public data sets related to electric utilities in the United States. These data sets are often collected by state and federal agencies, but are publicized in ways that are not well standardized, or intended for interoperability. PUDL seeks to allow more transparent and useful access to this important public data, with the goal of enabling climate advocates, academic researchers, and data journalists to better understand the electricity system and its impacts on climate. """ __pythonrequiredversion__ = "3.7" __projecturl__ = "https://catalyst.coop/pudl/" __downloadurl__ = "https://github.com/catalyst-cooperative/pudl/" logging.getLogger(__name__).addHandler(logging.NullHandler())
true
true
f7246c03ec5401f98478d3072cffa65821a40e6d
5,510
py
Python
test/functional/p2p_disconnect_ban.py
Simple-Software-Solutions/RBX-Core
8cf0dfda708233e080e8729cec0b5014218386e3
[ "MIT" ]
null
null
null
test/functional/p2p_disconnect_ban.py
Simple-Software-Solutions/RBX-Core
8cf0dfda708233e080e8729cec0b5014218386e3
[ "MIT" ]
null
null
null
test/functional/p2p_disconnect_ban.py
Simple-Software-Solutions/RBX-Core
8cf0dfda708233e080e8729cec0b5014218386e3
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node disconnect and ban behavior""" import time from test_framework.test_framework import RbxTestFramework from test_framework.util import ( assert_equal, connect_nodes, assert_raises_rpc_error, wait_until, ) class DisconnectBanTest(RbxTestFramework): def set_test_params(self): self.num_nodes = 2 def run_test(self): self.log.info("Connect nodes both way") connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[1], 0) self.log.info("Test setban and listbanned RPCs") self.log.info("setban: successfully ban single IP address") assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point self.nodes[1].setban("127.0.0.1", "add") wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10) assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("clearbanned: successfully clear ban list") self.nodes[1].clearbanned() assert_equal(len(self.nodes[1].listbanned()), 0) self.nodes[1].setban("127.0.0.0/24", "add") self.log.info("setban: fail to ban an already banned subnet") assert_equal(len(self.nodes[1].listbanned()), 1) assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add") self.log.info("setban: fail to ban an invalid subnet") assert_raises_rpc_error(-23, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add") assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24 self.log.info("setban remove: fail to unban a non-banned subnet") assert_raises_rpc_error(-1, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove") assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("setban remove: successfully unban subnet") self.nodes[1].setban("127.0.0.0/24", "remove") assert_equal(len(self.nodes[1].listbanned()), 0) self.nodes[1].clearbanned() assert_equal(len(self.nodes[1].listbanned()), 0) self.log.info("setban: test persistence across node restart") self.nodes[1].setban("127.0.0.0/32", "add") self.nodes[1].setban("127.0.0.0/24", "add") # Set the mocktime so we can control when bans expire old_time = int(time.time()) self.nodes[1].setmocktime(old_time) self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds listBeforeShutdown = self.nodes[1].listbanned() assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) # Move time forward by 3 seconds so the third ban has expired self.nodes[1].setmocktime(old_time + 3) assert_equal(len(self.nodes[1].listbanned()), 4) self.stop_node(1) self.start_node(1) listAfterShutdown = self.nodes[1].listbanned() assert_equal("127.0.0.0/24", listAfterShutdown[0]['address']) assert_equal("127.0.0.0/32", listAfterShutdown[1]['address']) assert_equal("/19" in listAfterShutdown[2]['address'], True) # Clear ban lists self.nodes[1].clearbanned() self.log.info("Connect nodes both way") connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[1], 0) self.log.info("Test disconnectnode RPCs") #self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid") #address1 = self.nodes[0].getpeerinfo()[0]['addr'] #node1 = self.nodes[0].getpeerinfo()[0]['addr'] #assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1) self.log.info("disconnectnode: fail to disconnect when calling with junk address") assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, "221B Baker Street") self.log.info("disconnectnode: successfully disconnect node by address") address1 = self.nodes[0].getpeerinfo()[0]['addr'] self.nodes[0].disconnectnode(address1) wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10) assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1] self.log.info("disconnectnode: successfully reconnect node") connect_nodes(self.nodes[0], 1) # reconnect the node assert_equal(len(self.nodes[0].getpeerinfo()), 2) assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1] #self.log.info("disconnectnode: successfully disconnect node by node id") #id1 = self.nodes[0].getpeerinfo()[0]['id'] #self.nodes[0].disconnectnode(nodeid=id1) #wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10) #assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1] if __name__ == '__main__': DisconnectBanTest().main()
48.333333
156
0.659891
import time from test_framework.test_framework import RbxTestFramework from test_framework.util import ( assert_equal, connect_nodes, assert_raises_rpc_error, wait_until, ) class DisconnectBanTest(RbxTestFramework): def set_test_params(self): self.num_nodes = 2 def run_test(self): self.log.info("Connect nodes both way") connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[1], 0) self.log.info("Test setban and listbanned RPCs") self.log.info("setban: successfully ban single IP address") assert_equal(len(self.nodes[1].getpeerinfo()), 2) self.nodes[1].setban("127.0.0.1", "add") wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10) assert_equal(len(self.nodes[1].getpeerinfo()), 0) assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("clearbanned: successfully clear ban list") self.nodes[1].clearbanned() assert_equal(len(self.nodes[1].listbanned()), 0) self.nodes[1].setban("127.0.0.0/24", "add") self.log.info("setban: fail to ban an already banned subnet") assert_equal(len(self.nodes[1].listbanned()), 1) assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add") self.log.info("setban: fail to ban an invalid subnet") assert_raises_rpc_error(-23, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add") assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("setban remove: fail to unban a non-banned subnet") assert_raises_rpc_error(-1, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove") assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("setban remove: successfully unban subnet") self.nodes[1].setban("127.0.0.0/24", "remove") assert_equal(len(self.nodes[1].listbanned()), 0) self.nodes[1].clearbanned() assert_equal(len(self.nodes[1].listbanned()), 0) self.log.info("setban: test persistence across node restart") self.nodes[1].setban("127.0.0.0/32", "add") self.nodes[1].setban("127.0.0.0/24", "add") old_time = int(time.time()) self.nodes[1].setmocktime(old_time) self.nodes[1].setban("192.168.0.1", "add", 1) self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) listBeforeShutdown = self.nodes[1].listbanned() assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) self.nodes[1].setmocktime(old_time + 3) assert_equal(len(self.nodes[1].listbanned()), 4) self.stop_node(1) self.start_node(1) listAfterShutdown = self.nodes[1].listbanned() assert_equal("127.0.0.0/24", listAfterShutdown[0]['address']) assert_equal("127.0.0.0/32", listAfterShutdown[1]['address']) assert_equal("/19" in listAfterShutdown[2]['address'], True) self.nodes[1].clearbanned() self.log.info("Connect nodes both way") connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[1], 0) self.log.info("Test disconnectnode RPCs") self.log.info("disconnectnode: fail to disconnect when calling with junk address") assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, "221B Baker Street") self.log.info("disconnectnode: successfully disconnect node by address") address1 = self.nodes[0].getpeerinfo()[0]['addr'] self.nodes[0].disconnectnode(address1) wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10) assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1] self.log.info("disconnectnode: successfully reconnect node") connect_nodes(self.nodes[0], 1) assert_equal(len(self.nodes[0].getpeerinfo()), 2) assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1] if __name__ == '__main__': DisconnectBanTest().main()
true
true
f7246cc917719e28deb7b06b6817fd9d7b3f055b
1,758
py
Python
core/src/zeit/edit/browser/resources.py
rickdg/vivi
16134ac954bf8425646d4ad47bdd1f372e089355
[ "BSD-3-Clause" ]
5
2019-05-16T09:51:29.000Z
2021-05-31T09:30:03.000Z
core/src/zeit/edit/browser/resources.py
rickdg/vivi
16134ac954bf8425646d4ad47bdd1f372e089355
[ "BSD-3-Clause" ]
107
2019-05-24T12:19:02.000Z
2022-03-23T15:05:56.000Z
core/src/zeit/edit/browser/resources.py
rickdg/vivi
16134ac954bf8425646d4ad47bdd1f372e089355
[ "BSD-3-Clause" ]
3
2020-08-14T11:01:17.000Z
2022-01-08T17:32:19.000Z
from zeit.cms.browser.resources import SplitDirResource, Library import zeit.cms.browser.resources import zeit.find.browser.resources lib_css = Library('zeit.edit', 'resources') lib_js = Library('zeit.edit.js', 'js') SplitDirResource('editor.css') SplitDirResource('fold.js', depends=[zeit.cms.browser.resources.base]) SplitDirResource('json.js', depends=[zeit.cms.browser.resources.base]) SplitDirResource('edit.js', depends=[ zeit.cms.browser.resources.base, zeit.cms.browser.resources.tab_js, zeit.find.browser.resources.find_js, json_js, # noqa editor_css, # noqa ]) SplitDirResource('context.js', depends=[ zeit.cms.browser.resources.base, edit_js]) # noqa SplitDirResource('drop.js', depends=[ zeit.cms.browser.resources.base, zeit.cms.browser.resources.dnd_js, context_js]) # noqa SplitDirResource('sortable.js', depends=[ zeit.cms.browser.resources.base, context_js, # noqa drop_js]) # noqa SplitDirResource('inlineform.js', depends=[ zeit.cms.browser.resources.base, zeit.cms.browser.resources.view_js, zeit.cms.browser.resources.form_js, edit_js, # noqa editor_css, # noqa ]) SplitDirResource('lightbox.js', depends=[ zeit.cms.browser.resources.base, zeit.cms.browser.resources.lightbox_js, zeit.cms.browser.resources.tab_js, context_js, # noqa edit_js, # noqa editor_css, # noqa ]) SplitDirResource('library.js', depends=[ zeit.cms.browser.resources.base, zeit.cms.browser.resources.view_js, zeit.cms.browser.resources.tab_js, drop_js, # noqa editor_css, # noqa ]) SplitDirResource('undo.js', depends=[ zeit.cms.browser.resources.base, zeit.cms.browser.resources.view_js, editor_css, # noqa ])
27.046154
70
0.713879
from zeit.cms.browser.resources import SplitDirResource, Library import zeit.cms.browser.resources import zeit.find.browser.resources lib_css = Library('zeit.edit', 'resources') lib_js = Library('zeit.edit.js', 'js') SplitDirResource('editor.css') SplitDirResource('fold.js', depends=[zeit.cms.browser.resources.base]) SplitDirResource('json.js', depends=[zeit.cms.browser.resources.base]) SplitDirResource('edit.js', depends=[ zeit.cms.browser.resources.base, zeit.cms.browser.resources.tab_js, zeit.find.browser.resources.find_js, json_js, editor_css, ]) SplitDirResource('context.js', depends=[ zeit.cms.browser.resources.base, edit_js]) SplitDirResource('drop.js', depends=[ zeit.cms.browser.resources.base, zeit.cms.browser.resources.dnd_js, context_js]) SplitDirResource('sortable.js', depends=[ zeit.cms.browser.resources.base, context_js, drop_js]) SplitDirResource('inlineform.js', depends=[ zeit.cms.browser.resources.base, zeit.cms.browser.resources.view_js, zeit.cms.browser.resources.form_js, edit_js, editor_css, ]) SplitDirResource('lightbox.js', depends=[ zeit.cms.browser.resources.base, zeit.cms.browser.resources.lightbox_js, zeit.cms.browser.resources.tab_js, context_js, edit_js, editor_css, ]) SplitDirResource('library.js', depends=[ zeit.cms.browser.resources.base, zeit.cms.browser.resources.view_js, zeit.cms.browser.resources.tab_js, drop_js, editor_css, ]) SplitDirResource('undo.js', depends=[ zeit.cms.browser.resources.base, zeit.cms.browser.resources.view_js, editor_css, ])
true
true
f7246cd07aed951d4878742924bcbfb2fe5565c2
329
py
Python
factioncli/commands/credentials.py
joncave/CLI
e12113594574bd4ca112895c6df59d1ae1c2094f
[ "BSD-3-Clause" ]
null
null
null
factioncli/commands/credentials.py
joncave/CLI
e12113594574bd4ca112895c6df59d1ae1c2094f
[ "BSD-3-Clause" ]
null
null
null
factioncli/commands/credentials.py
joncave/CLI
e12113594574bd4ca112895c6df59d1ae1c2094f
[ "BSD-3-Clause" ]
null
null
null
from cliff.lister import Lister from factioncli.processing.config import get_passwords class Credentials(Lister): "Returns a list of the default credentials for this instance of Faction" def take_action(self, parsed_args): passwords = get_passwords() return ("Type", "Username", "Password"), passwords
29.909091
76
0.738602
from cliff.lister import Lister from factioncli.processing.config import get_passwords class Credentials(Lister): def take_action(self, parsed_args): passwords = get_passwords() return ("Type", "Username", "Password"), passwords
true
true
f7246fb1d3475e6a8f8ea2b2fd45ec7d3e10c62f
16,568
py
Python
venv/Lib/site-packages/scipy/sparse/linalg/tests/test_interface.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
353
2020-12-10T10:47:17.000Z
2022-03-31T23:08:29.000Z
venv/Lib/site-packages/scipy/sparse/linalg/tests/test_interface.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
80
2020-12-10T09:54:22.000Z
2022-03-30T22:08:45.000Z
venv/Lib/site-packages/scipy/sparse/linalg/tests/test_interface.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
63
2020-12-10T17:10:34.000Z
2022-03-28T16:27:07.000Z
"""Test functions for the sparse.linalg.interface module """ from functools import partial from itertools import product import operator import pytest from pytest import raises as assert_raises, warns from numpy.testing import assert_, assert_equal import numpy as np import scipy.sparse as sparse from scipy.sparse.linalg import interface from scipy.sparse.sputils import matrix class TestLinearOperator(object): def setup_method(self): self.A = np.array([[1,2,3], [4,5,6]]) self.B = np.array([[1,2], [3,4], [5,6]]) self.C = np.array([[1,2], [3,4]]) def test_matvec(self): def get_matvecs(A): return [{ 'shape': A.shape, 'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]), 'rmatvec': lambda x: np.dot(A.T.conj(), x).reshape(A.shape[1]) }, { 'shape': A.shape, 'matvec': lambda x: np.dot(A, x), 'rmatvec': lambda x: np.dot(A.T.conj(), x), 'rmatmat': lambda x: np.dot(A.T.conj(), x), 'matmat': lambda x: np.dot(A, x) }] for matvecs in get_matvecs(self.A): A = interface.LinearOperator(**matvecs) assert_(A.args == ()) assert_equal(A.matvec(np.array([1,2,3])), [14,32]) assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]]) assert_equal(A * np.array([1,2,3]), [14,32]) assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]]) assert_equal(A.dot(np.array([1,2,3])), [14,32]) assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]]) assert_equal(A.matvec(matrix([[1],[2],[3]])), [[14],[32]]) assert_equal(A * matrix([[1],[2],[3]]), [[14],[32]]) assert_equal(A.dot(matrix([[1],[2],[3]])), [[14],[32]]) assert_equal((2*A)*[1,1,1], [12,30]) assert_equal((2 * A).rmatvec([1, 1]), [10, 14, 18]) assert_equal((2*A).H.matvec([1,1]), [10, 14, 18]) assert_equal((2*A)*[[1],[1],[1]], [[12],[30]]) assert_equal((2 * A).matmat([[1], [1], [1]]), [[12], [30]]) assert_equal((A*2)*[1,1,1], [12,30]) assert_equal((A*2)*[[1],[1],[1]], [[12],[30]]) assert_equal((2j*A)*[1,1,1], [12j,30j]) assert_equal((A+A)*[1,1,1], [12, 30]) assert_equal((A + A).rmatvec([1, 1]), [10, 14, 18]) assert_equal((A+A).H.matvec([1,1]), [10, 14, 18]) assert_equal((A+A)*[[1],[1],[1]], [[12], [30]]) assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]]) assert_equal((-A)*[1,1,1], [-6,-15]) assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]]) assert_equal((A-A)*[1,1,1], [0,0]) assert_equal((A - A) * [[1], [1], [1]], [[0], [0]]) X = np.array([[1, 2], [3, 4]]) # A_asarray = np.array([[1, 2, 3], [4, 5, 6]]) assert_equal((2 * A).rmatmat(X), np.dot((2 * self.A).T, X)) assert_equal((A * 2).rmatmat(X), np.dot((self.A * 2).T, X)) assert_equal((2j * A).rmatmat(X), np.dot((2j * self.A).T.conj(), X)) assert_equal((A * 2j).rmatmat(X), np.dot((self.A * 2j).T.conj(), X)) assert_equal((A + A).rmatmat(X), np.dot((self.A + self.A).T, X)) assert_equal((A + 2j * A).rmatmat(X), np.dot((self.A + 2j * self.A).T.conj(), X)) assert_equal((-A).rmatmat(X), np.dot((-self.A).T, X)) assert_equal((A - A).rmatmat(X), np.dot((self.A - self.A).T, X)) assert_equal((2j * A).rmatmat(2j * X), np.dot((2j * self.A).T.conj(), 2j * X)) z = A+A assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A) z = 2*A assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2) assert_(isinstance(A.matvec([1, 2, 3]), np.ndarray)) assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray)) assert_(isinstance(A * np.array([1,2,3]), np.ndarray)) assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray)) assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray)) assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray)) assert_(isinstance(A.matvec(matrix([[1],[2],[3]])), np.ndarray)) assert_(isinstance(A * matrix([[1],[2],[3]]), np.ndarray)) assert_(isinstance(A.dot(matrix([[1],[2],[3]])), np.ndarray)) assert_(isinstance(2*A, interface._ScaledLinearOperator)) assert_(isinstance(2j*A, interface._ScaledLinearOperator)) assert_(isinstance(A+A, interface._SumLinearOperator)) assert_(isinstance(-A, interface._ScaledLinearOperator)) assert_(isinstance(A-A, interface._SumLinearOperator)) assert_((2j*A).dtype == np.complex_) assert_raises(ValueError, A.matvec, np.array([1,2])) assert_raises(ValueError, A.matvec, np.array([1,2,3,4])) assert_raises(ValueError, A.matvec, np.array([[1],[2]])) assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]])) assert_raises(ValueError, lambda: A*A) assert_raises(ValueError, lambda: A**2) for matvecsA, matvecsB in product(get_matvecs(self.A), get_matvecs(self.B)): A = interface.LinearOperator(**matvecsA) B = interface.LinearOperator(**matvecsB) # AtimesB = np.array([[22, 28], [49, 64]]) AtimesB = self.A.dot(self.B) X = np.array([[1, 2], [3, 4]]) assert_equal((A * B).rmatmat(X), np.dot((AtimesB).T, X)) assert_equal((2j * A * B).rmatmat(X), np.dot((2j * AtimesB).T.conj(), X)) assert_equal((A*B)*[1,1], [50,113]) assert_equal((A*B)*[[1],[1]], [[50],[113]]) assert_equal((A*B).matmat([[1],[1]]), [[50],[113]]) assert_equal((A * B).rmatvec([1, 1]), [71, 92]) assert_equal((A * B).H.matvec([1, 1]), [71, 92]) assert_(isinstance(A*B, interface._ProductLinearOperator)) assert_raises(ValueError, lambda: A+B) assert_raises(ValueError, lambda: A**2) z = A*B assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B) for matvecsC in get_matvecs(self.C): C = interface.LinearOperator(**matvecsC) X = np.array([[1, 2], [3, 4]]) assert_equal(C.rmatmat(X), np.dot((self.C).T, X)) assert_equal((C**2).rmatmat(X), np.dot((np.dot(self.C, self.C)).T, X)) assert_equal((C**2)*[1,1], [17,37]) assert_equal((C**2).rmatvec([1, 1]), [22, 32]) assert_equal((C**2).H.matvec([1, 1]), [22, 32]) assert_equal((C**2).matmat([[1],[1]]), [[17],[37]]) assert_(isinstance(C**2, interface._PowerLinearOperator)) def test_matmul(self): D = {'shape': self.A.shape, 'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]), 'rmatvec': lambda x: np.dot(self.A.T.conj(), x).reshape(self.A.shape[1]), 'rmatmat': lambda x: np.dot(self.A.T.conj(), x), 'matmat': lambda x: np.dot(self.A, x)} A = interface.LinearOperator(**D) B = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = B[0] assert_equal(operator.matmul(A, b), A * b) assert_equal(operator.matmul(A, B), A * B) assert_raises(ValueError, operator.matmul, A, 2) assert_raises(ValueError, operator.matmul, 2, A) class TestAsLinearOperator(object): def setup_method(self): self.cases = [] def make_cases(original, dtype): cases = [] cases.append((matrix(original, dtype=dtype), original)) cases.append((np.array(original, dtype=dtype), original)) cases.append((sparse.csr_matrix(original, dtype=dtype), original)) # Test default implementations of _adjoint and _rmatvec, which # refer to each other. def mv(x, dtype): y = original.dot(x) if len(x.shape) == 2: y = y.reshape(-1, 1) return y def rmv(x, dtype): return original.T.conj().dot(x) class BaseMatlike(interface.LinearOperator): args = () def __init__(self, dtype): self.dtype = np.dtype(dtype) self.shape = original.shape def _matvec(self, x): return mv(x, self.dtype) class HasRmatvec(BaseMatlike): args = () def _rmatvec(self,x): return rmv(x, self.dtype) class HasAdjoint(BaseMatlike): args = () def _adjoint(self): shape = self.shape[1], self.shape[0] matvec = partial(rmv, dtype=self.dtype) rmatvec = partial(mv, dtype=self.dtype) return interface.LinearOperator(matvec=matvec, rmatvec=rmatvec, dtype=self.dtype, shape=shape) class HasRmatmat(HasRmatvec): def _matmat(self, x): return original.dot(x) def _rmatmat(self, x): return original.T.conj().dot(x) cases.append((HasRmatvec(dtype), original)) cases.append((HasAdjoint(dtype), original)) cases.append((HasRmatmat(dtype), original)) return cases original = np.array([[1,2,3], [4,5,6]]) self.cases += make_cases(original, np.int32) self.cases += make_cases(original, np.float32) self.cases += make_cases(original, np.float64) self.cases += [(interface.aslinearoperator(M).T, A.T) for M, A in make_cases(original.T, np.float64)] self.cases += [(interface.aslinearoperator(M).H, A.T.conj()) for M, A in make_cases(original.T, np.float64)] original = np.array([[1, 2j, 3j], [4j, 5j, 6]]) self.cases += make_cases(original, np.complex_) self.cases += [(interface.aslinearoperator(M).T, A.T) for M, A in make_cases(original.T, np.complex_)] self.cases += [(interface.aslinearoperator(M).H, A.T.conj()) for M, A in make_cases(original.T, np.complex_)] def test_basic(self): for M, A_array in self.cases: A = interface.aslinearoperator(M) M,N = A.shape xs = [np.array([1, 2, 3]), np.array([[1], [2], [3]])] ys = [np.array([1, 2]), np.array([[1], [2]])] if A.dtype == np.complex_: xs += [np.array([1, 2j, 3j]), np.array([[1], [2j], [3j]])] ys += [np.array([1, 2j]), np.array([[1], [2j]])] x2 = np.array([[1, 4], [2, 5], [3, 6]]) for x in xs: assert_equal(A.matvec(x), A_array.dot(x)) assert_equal(A * x, A_array.dot(x)) assert_equal(A.matmat(x2), A_array.dot(x2)) assert_equal(A * x2, A_array.dot(x2)) for y in ys: assert_equal(A.rmatvec(y), A_array.T.conj().dot(y)) assert_equal(A.T.matvec(y), A_array.T.dot(y)) assert_equal(A.H.matvec(y), A_array.T.conj().dot(y)) for y in ys: if y.ndim < 2: continue assert_equal(A.rmatmat(y), A_array.T.conj().dot(y)) assert_equal(A.T.matmat(y), A_array.T.dot(y)) assert_equal(A.H.matmat(y), A_array.T.conj().dot(y)) if hasattr(M,'dtype'): assert_equal(A.dtype, M.dtype) assert_(hasattr(A, 'args')) def test_dot(self): for M, A_array in self.cases: A = interface.aslinearoperator(M) M,N = A.shape x0 = np.array([1, 2, 3]) x1 = np.array([[1], [2], [3]]) x2 = np.array([[1, 4], [2, 5], [3, 6]]) assert_equal(A.dot(x0), A_array.dot(x0)) assert_equal(A.dot(x1), A_array.dot(x1)) assert_equal(A.dot(x2), A_array.dot(x2)) def test_repr(): A = interface.LinearOperator(shape=(1, 1), matvec=lambda x: 1) repr_A = repr(A) assert_('unspecified dtype' not in repr_A, repr_A) def test_identity(): ident = interface.IdentityOperator((3, 3)) assert_equal(ident * [1, 2, 3], [1, 2, 3]) assert_equal(ident.dot(np.arange(9).reshape(3, 3)).ravel(), np.arange(9)) assert_raises(ValueError, ident.matvec, [1, 2, 3, 4]) def test_attributes(): A = interface.aslinearoperator(np.arange(16).reshape(4, 4)) def always_four_ones(x): x = np.asarray(x) assert_(x.shape == (3,) or x.shape == (3, 1)) return np.ones(4) B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones) for op in [A, B, A * B, A.H, A + A, B + B, A**4]: assert_(hasattr(op, "dtype")) assert_(hasattr(op, "shape")) assert_(hasattr(op, "_matvec")) def matvec(x): """ Needed for test_pickle as local functions are not pickleable """ return np.zeros(3) def test_pickle(): import pickle for protocol in range(pickle.HIGHEST_PROTOCOL + 1): A = interface.LinearOperator((3, 3), matvec) s = pickle.dumps(A, protocol=protocol) B = pickle.loads(s) for k in A.__dict__: assert_equal(getattr(A, k), getattr(B, k)) def test_inheritance(): class Empty(interface.LinearOperator): pass with warns(RuntimeWarning, match="should implement at least"): assert_raises(TypeError, Empty) class Identity(interface.LinearOperator): def __init__(self, n): super(Identity, self).__init__(dtype=None, shape=(n, n)) def _matvec(self, x): return x id3 = Identity(3) assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3]) assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6]) class MatmatOnly(interface.LinearOperator): def __init__(self, A): super(MatmatOnly, self).__init__(A.dtype, A.shape) self.A = A def _matmat(self, x): return self.A.dot(x) mm = MatmatOnly(np.random.randn(5, 3)) assert_equal(mm.matvec(np.random.randn(3)).shape, (5,)) def test_dtypes_of_operator_sum(): # gh-6078 mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2) mat_real = np.random.rand(2,2) complex_operator = interface.aslinearoperator(mat_complex) real_operator = interface.aslinearoperator(mat_real) sum_complex = complex_operator + complex_operator sum_real = real_operator + real_operator assert_equal(sum_real.dtype, np.float64) assert_equal(sum_complex.dtype, np.complex128) def test_no_double_init(): call_count = [0] def matvec(v): call_count[0] += 1 return v # It should call matvec exactly once (in order to determine the # operator dtype) interface.LinearOperator((2, 2), matvec=matvec) assert_equal(call_count[0], 1) def test_adjoint_conjugate(): X = np.array([[1j]]) A = interface.aslinearoperator(X) B = 1j * A Y = 1j * X v = np.array([1]) assert_equal(B.dot(v), Y.dot(v)) assert_equal(B.H.dot(v), Y.T.conj().dot(v)) def test_ndim(): X = np.array([[1]]) A = interface.aslinearoperator(X) assert_equal(A.ndim, 2) def test_transpose_noconjugate(): X = np.array([[1j]]) A = interface.aslinearoperator(X) B = 1j * A Y = 1j * X v = np.array([1]) assert_equal(B.dot(v), Y.dot(v)) assert_equal(B.T.dot(v), Y.T.dot(v))
36.736142
78
0.506639
from functools import partial from itertools import product import operator import pytest from pytest import raises as assert_raises, warns from numpy.testing import assert_, assert_equal import numpy as np import scipy.sparse as sparse from scipy.sparse.linalg import interface from scipy.sparse.sputils import matrix class TestLinearOperator(object): def setup_method(self): self.A = np.array([[1,2,3], [4,5,6]]) self.B = np.array([[1,2], [3,4], [5,6]]) self.C = np.array([[1,2], [3,4]]) def test_matvec(self): def get_matvecs(A): return [{ 'shape': A.shape, 'matvec': lambda x: np.dot(A, x).reshape(A.shape[0]), 'rmatvec': lambda x: np.dot(A.T.conj(), x).reshape(A.shape[1]) }, { 'shape': A.shape, 'matvec': lambda x: np.dot(A, x), 'rmatvec': lambda x: np.dot(A.T.conj(), x), 'rmatmat': lambda x: np.dot(A.T.conj(), x), 'matmat': lambda x: np.dot(A, x) }] for matvecs in get_matvecs(self.A): A = interface.LinearOperator(**matvecs) assert_(A.args == ()) assert_equal(A.matvec(np.array([1,2,3])), [14,32]) assert_equal(A.matvec(np.array([[1],[2],[3]])), [[14],[32]]) assert_equal(A * np.array([1,2,3]), [14,32]) assert_equal(A * np.array([[1],[2],[3]]), [[14],[32]]) assert_equal(A.dot(np.array([1,2,3])), [14,32]) assert_equal(A.dot(np.array([[1],[2],[3]])), [[14],[32]]) assert_equal(A.matvec(matrix([[1],[2],[3]])), [[14],[32]]) assert_equal(A * matrix([[1],[2],[3]]), [[14],[32]]) assert_equal(A.dot(matrix([[1],[2],[3]])), [[14],[32]]) assert_equal((2*A)*[1,1,1], [12,30]) assert_equal((2 * A).rmatvec([1, 1]), [10, 14, 18]) assert_equal((2*A).H.matvec([1,1]), [10, 14, 18]) assert_equal((2*A)*[[1],[1],[1]], [[12],[30]]) assert_equal((2 * A).matmat([[1], [1], [1]]), [[12], [30]]) assert_equal((A*2)*[1,1,1], [12,30]) assert_equal((A*2)*[[1],[1],[1]], [[12],[30]]) assert_equal((2j*A)*[1,1,1], [12j,30j]) assert_equal((A+A)*[1,1,1], [12, 30]) assert_equal((A + A).rmatvec([1, 1]), [10, 14, 18]) assert_equal((A+A).H.matvec([1,1]), [10, 14, 18]) assert_equal((A+A)*[[1],[1],[1]], [[12], [30]]) assert_equal((A+A).matmat([[1],[1],[1]]), [[12], [30]]) assert_equal((-A)*[1,1,1], [-6,-15]) assert_equal((-A)*[[1],[1],[1]], [[-6],[-15]]) assert_equal((A-A)*[1,1,1], [0,0]) assert_equal((A - A) * [[1], [1], [1]], [[0], [0]]) X = np.array([[1, 2], [3, 4]]) assert_equal((2 * A).rmatmat(X), np.dot((2 * self.A).T, X)) assert_equal((A * 2).rmatmat(X), np.dot((self.A * 2).T, X)) assert_equal((2j * A).rmatmat(X), np.dot((2j * self.A).T.conj(), X)) assert_equal((A * 2j).rmatmat(X), np.dot((self.A * 2j).T.conj(), X)) assert_equal((A + A).rmatmat(X), np.dot((self.A + self.A).T, X)) assert_equal((A + 2j * A).rmatmat(X), np.dot((self.A + 2j * self.A).T.conj(), X)) assert_equal((-A).rmatmat(X), np.dot((-self.A).T, X)) assert_equal((A - A).rmatmat(X), np.dot((self.A - self.A).T, X)) assert_equal((2j * A).rmatmat(2j * X), np.dot((2j * self.A).T.conj(), 2j * X)) z = A+A assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is A) z = 2*A assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] == 2) assert_(isinstance(A.matvec([1, 2, 3]), np.ndarray)) assert_(isinstance(A.matvec(np.array([[1],[2],[3]])), np.ndarray)) assert_(isinstance(A * np.array([1,2,3]), np.ndarray)) assert_(isinstance(A * np.array([[1],[2],[3]]), np.ndarray)) assert_(isinstance(A.dot(np.array([1,2,3])), np.ndarray)) assert_(isinstance(A.dot(np.array([[1],[2],[3]])), np.ndarray)) assert_(isinstance(A.matvec(matrix([[1],[2],[3]])), np.ndarray)) assert_(isinstance(A * matrix([[1],[2],[3]]), np.ndarray)) assert_(isinstance(A.dot(matrix([[1],[2],[3]])), np.ndarray)) assert_(isinstance(2*A, interface._ScaledLinearOperator)) assert_(isinstance(2j*A, interface._ScaledLinearOperator)) assert_(isinstance(A+A, interface._SumLinearOperator)) assert_(isinstance(-A, interface._ScaledLinearOperator)) assert_(isinstance(A-A, interface._SumLinearOperator)) assert_((2j*A).dtype == np.complex_) assert_raises(ValueError, A.matvec, np.array([1,2])) assert_raises(ValueError, A.matvec, np.array([1,2,3,4])) assert_raises(ValueError, A.matvec, np.array([[1],[2]])) assert_raises(ValueError, A.matvec, np.array([[1],[2],[3],[4]])) assert_raises(ValueError, lambda: A*A) assert_raises(ValueError, lambda: A**2) for matvecsA, matvecsB in product(get_matvecs(self.A), get_matvecs(self.B)): A = interface.LinearOperator(**matvecsA) B = interface.LinearOperator(**matvecsB) AtimesB = self.A.dot(self.B) X = np.array([[1, 2], [3, 4]]) assert_equal((A * B).rmatmat(X), np.dot((AtimesB).T, X)) assert_equal((2j * A * B).rmatmat(X), np.dot((2j * AtimesB).T.conj(), X)) assert_equal((A*B)*[1,1], [50,113]) assert_equal((A*B)*[[1],[1]], [[50],[113]]) assert_equal((A*B).matmat([[1],[1]]), [[50],[113]]) assert_equal((A * B).rmatvec([1, 1]), [71, 92]) assert_equal((A * B).H.matvec([1, 1]), [71, 92]) assert_(isinstance(A*B, interface._ProductLinearOperator)) assert_raises(ValueError, lambda: A+B) assert_raises(ValueError, lambda: A**2) z = A*B assert_(len(z.args) == 2 and z.args[0] is A and z.args[1] is B) for matvecsC in get_matvecs(self.C): C = interface.LinearOperator(**matvecsC) X = np.array([[1, 2], [3, 4]]) assert_equal(C.rmatmat(X), np.dot((self.C).T, X)) assert_equal((C**2).rmatmat(X), np.dot((np.dot(self.C, self.C)).T, X)) assert_equal((C**2)*[1,1], [17,37]) assert_equal((C**2).rmatvec([1, 1]), [22, 32]) assert_equal((C**2).H.matvec([1, 1]), [22, 32]) assert_equal((C**2).matmat([[1],[1]]), [[17],[37]]) assert_(isinstance(C**2, interface._PowerLinearOperator)) def test_matmul(self): D = {'shape': self.A.shape, 'matvec': lambda x: np.dot(self.A, x).reshape(self.A.shape[0]), 'rmatvec': lambda x: np.dot(self.A.T.conj(), x).reshape(self.A.shape[1]), 'rmatmat': lambda x: np.dot(self.A.T.conj(), x), 'matmat': lambda x: np.dot(self.A, x)} A = interface.LinearOperator(**D) B = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = B[0] assert_equal(operator.matmul(A, b), A * b) assert_equal(operator.matmul(A, B), A * B) assert_raises(ValueError, operator.matmul, A, 2) assert_raises(ValueError, operator.matmul, 2, A) class TestAsLinearOperator(object): def setup_method(self): self.cases = [] def make_cases(original, dtype): cases = [] cases.append((matrix(original, dtype=dtype), original)) cases.append((np.array(original, dtype=dtype), original)) cases.append((sparse.csr_matrix(original, dtype=dtype), original)) def mv(x, dtype): y = original.dot(x) if len(x.shape) == 2: y = y.reshape(-1, 1) return y def rmv(x, dtype): return original.T.conj().dot(x) class BaseMatlike(interface.LinearOperator): args = () def __init__(self, dtype): self.dtype = np.dtype(dtype) self.shape = original.shape def _matvec(self, x): return mv(x, self.dtype) class HasRmatvec(BaseMatlike): args = () def _rmatvec(self,x): return rmv(x, self.dtype) class HasAdjoint(BaseMatlike): args = () def _adjoint(self): shape = self.shape[1], self.shape[0] matvec = partial(rmv, dtype=self.dtype) rmatvec = partial(mv, dtype=self.dtype) return interface.LinearOperator(matvec=matvec, rmatvec=rmatvec, dtype=self.dtype, shape=shape) class HasRmatmat(HasRmatvec): def _matmat(self, x): return original.dot(x) def _rmatmat(self, x): return original.T.conj().dot(x) cases.append((HasRmatvec(dtype), original)) cases.append((HasAdjoint(dtype), original)) cases.append((HasRmatmat(dtype), original)) return cases original = np.array([[1,2,3], [4,5,6]]) self.cases += make_cases(original, np.int32) self.cases += make_cases(original, np.float32) self.cases += make_cases(original, np.float64) self.cases += [(interface.aslinearoperator(M).T, A.T) for M, A in make_cases(original.T, np.float64)] self.cases += [(interface.aslinearoperator(M).H, A.T.conj()) for M, A in make_cases(original.T, np.float64)] original = np.array([[1, 2j, 3j], [4j, 5j, 6]]) self.cases += make_cases(original, np.complex_) self.cases += [(interface.aslinearoperator(M).T, A.T) for M, A in make_cases(original.T, np.complex_)] self.cases += [(interface.aslinearoperator(M).H, A.T.conj()) for M, A in make_cases(original.T, np.complex_)] def test_basic(self): for M, A_array in self.cases: A = interface.aslinearoperator(M) M,N = A.shape xs = [np.array([1, 2, 3]), np.array([[1], [2], [3]])] ys = [np.array([1, 2]), np.array([[1], [2]])] if A.dtype == np.complex_: xs += [np.array([1, 2j, 3j]), np.array([[1], [2j], [3j]])] ys += [np.array([1, 2j]), np.array([[1], [2j]])] x2 = np.array([[1, 4], [2, 5], [3, 6]]) for x in xs: assert_equal(A.matvec(x), A_array.dot(x)) assert_equal(A * x, A_array.dot(x)) assert_equal(A.matmat(x2), A_array.dot(x2)) assert_equal(A * x2, A_array.dot(x2)) for y in ys: assert_equal(A.rmatvec(y), A_array.T.conj().dot(y)) assert_equal(A.T.matvec(y), A_array.T.dot(y)) assert_equal(A.H.matvec(y), A_array.T.conj().dot(y)) for y in ys: if y.ndim < 2: continue assert_equal(A.rmatmat(y), A_array.T.conj().dot(y)) assert_equal(A.T.matmat(y), A_array.T.dot(y)) assert_equal(A.H.matmat(y), A_array.T.conj().dot(y)) if hasattr(M,'dtype'): assert_equal(A.dtype, M.dtype) assert_(hasattr(A, 'args')) def test_dot(self): for M, A_array in self.cases: A = interface.aslinearoperator(M) M,N = A.shape x0 = np.array([1, 2, 3]) x1 = np.array([[1], [2], [3]]) x2 = np.array([[1, 4], [2, 5], [3, 6]]) assert_equal(A.dot(x0), A_array.dot(x0)) assert_equal(A.dot(x1), A_array.dot(x1)) assert_equal(A.dot(x2), A_array.dot(x2)) def test_repr(): A = interface.LinearOperator(shape=(1, 1), matvec=lambda x: 1) repr_A = repr(A) assert_('unspecified dtype' not in repr_A, repr_A) def test_identity(): ident = interface.IdentityOperator((3, 3)) assert_equal(ident * [1, 2, 3], [1, 2, 3]) assert_equal(ident.dot(np.arange(9).reshape(3, 3)).ravel(), np.arange(9)) assert_raises(ValueError, ident.matvec, [1, 2, 3, 4]) def test_attributes(): A = interface.aslinearoperator(np.arange(16).reshape(4, 4)) def always_four_ones(x): x = np.asarray(x) assert_(x.shape == (3,) or x.shape == (3, 1)) return np.ones(4) B = interface.LinearOperator(shape=(4, 3), matvec=always_four_ones) for op in [A, B, A * B, A.H, A + A, B + B, A**4]: assert_(hasattr(op, "dtype")) assert_(hasattr(op, "shape")) assert_(hasattr(op, "_matvec")) def matvec(x): return np.zeros(3) def test_pickle(): import pickle for protocol in range(pickle.HIGHEST_PROTOCOL + 1): A = interface.LinearOperator((3, 3), matvec) s = pickle.dumps(A, protocol=protocol) B = pickle.loads(s) for k in A.__dict__: assert_equal(getattr(A, k), getattr(B, k)) def test_inheritance(): class Empty(interface.LinearOperator): pass with warns(RuntimeWarning, match="should implement at least"): assert_raises(TypeError, Empty) class Identity(interface.LinearOperator): def __init__(self, n): super(Identity, self).__init__(dtype=None, shape=(n, n)) def _matvec(self, x): return x id3 = Identity(3) assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3]) assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6]) class MatmatOnly(interface.LinearOperator): def __init__(self, A): super(MatmatOnly, self).__init__(A.dtype, A.shape) self.A = A def _matmat(self, x): return self.A.dot(x) mm = MatmatOnly(np.random.randn(5, 3)) assert_equal(mm.matvec(np.random.randn(3)).shape, (5,)) def test_dtypes_of_operator_sum(): mat_complex = np.random.rand(2,2) + 1j * np.random.rand(2,2) mat_real = np.random.rand(2,2) complex_operator = interface.aslinearoperator(mat_complex) real_operator = interface.aslinearoperator(mat_real) sum_complex = complex_operator + complex_operator sum_real = real_operator + real_operator assert_equal(sum_real.dtype, np.float64) assert_equal(sum_complex.dtype, np.complex128) def test_no_double_init(): call_count = [0] def matvec(v): call_count[0] += 1 return v interface.LinearOperator((2, 2), matvec=matvec) assert_equal(call_count[0], 1) def test_adjoint_conjugate(): X = np.array([[1j]]) A = interface.aslinearoperator(X) B = 1j * A Y = 1j * X v = np.array([1]) assert_equal(B.dot(v), Y.dot(v)) assert_equal(B.H.dot(v), Y.T.conj().dot(v)) def test_ndim(): X = np.array([[1]]) A = interface.aslinearoperator(X) assert_equal(A.ndim, 2) def test_transpose_noconjugate(): X = np.array([[1j]]) A = interface.aslinearoperator(X) B = 1j * A Y = 1j * X v = np.array([1]) assert_equal(B.dot(v), Y.dot(v)) assert_equal(B.T.dot(v), Y.T.dot(v))
true
true
f7246fd41a52c4c852543dc3cdc9672346ec9dbe
6,333
py
Python
doc/source/conf.py
gaohao95/cffi
8d1a4ec54db0f3f0e18e4a68c2bdc7f32d0fdd8b
[ "MIT" ]
1
2017-01-05T00:59:03.000Z
2017-01-05T00:59:03.000Z
doc/source/conf.py
gaohao95/cffi
8d1a4ec54db0f3f0e18e4a68c2bdc7f32d0fdd8b
[ "MIT" ]
null
null
null
doc/source/conf.py
gaohao95/cffi
8d1a4ec54db0f3f0e18e4a68c2bdc7f32d0fdd8b
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # CFFI documentation build configuration file, created by # sphinx-quickstart on Thu Jun 14 16:37:47 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'CFFI' copyright = u'2012-2015, Armin Rigo, Maciej Fijalkowski' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.9' # The full version, including alpha/beta/rc tags. release = '1.9.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. #html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'CFFIdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'CFFI.tex', u'CFFI Documentation', u'Armin Rigo, Maciej Fijalkowski', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
32.476923
80
0.722722
import sys, os extensions = ['sphinx.ext.autodoc'] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = u'CFFI' copyright = u'2012-2015, Armin Rigo, Maciej Fijalkowski' # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.9' # The full version, including alpha/beta/rc tags. release = '1.9.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. #html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'CFFIdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'CFFI.tex', u'CFFI Documentation', u'Armin Rigo, Maciej Fijalkowski', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
true
true
f72470edb12a542373c15f2c30a033702b46467a
1,325
py
Python
tests/test_erd.py
Datateer/erd-python
528b876bf5adf6114cd1e0aac6a2be14006fd2eb
[ "MIT" ]
1
2021-09-13T06:05:48.000Z
2021-09-13T06:05:48.000Z
tests/test_erd.py
Datateer/erd-python
528b876bf5adf6114cd1e0aac6a2be14006fd2eb
[ "MIT" ]
2
2021-01-22T11:20:34.000Z
2022-01-18T07:20:09.000Z
tests/test_erd.py
Datateer/erd-python
528b876bf5adf6114cd1e0aac6a2be14006fd2eb
[ "MIT" ]
null
null
null
#!/usr/bin/env python """Tests for `erd` package.""" import unittest from click.testing import CliRunner from erd import erd from erd import cli class TestErd(unittest.TestCase): """Tests for `erd` package.""" def setUp(self): """Set up test fixtures, if any.""" def tearDown(self): """Tear down test fixtures, if any.""" def test_000_something(self): """Test something.""" def test_command_line_interface(self): """Test the CLI.""" runner = CliRunner() result = runner.invoke(cli.main) assert result.exit_code == 0 assert 'erd.cli.main' in result.output help_result = runner.invoke(cli.main, ['--help']) assert help_result.exit_code == 0 assert '--help Show this message and exit.' in help_result.output @unittest.skip('not implemented yet') def test_input_filename_required(self): raise NotImplementedError() @unittest.skip('not implemented yet') def test_outputfilename_required(self): raise NotImplementedError() @unittest.skip('not implemented yet') def test_errors_if_input_file_not_found(self): raise NotImplementedError() @unittest.skip('not implemented yet') def test_produces_output_file(self): raise NotImplementedError()
25.980392
74
0.658113
import unittest from click.testing import CliRunner from erd import erd from erd import cli class TestErd(unittest.TestCase): def setUp(self): def tearDown(self): def test_000_something(self): def test_command_line_interface(self): runner = CliRunner() result = runner.invoke(cli.main) assert result.exit_code == 0 assert 'erd.cli.main' in result.output help_result = runner.invoke(cli.main, ['--help']) assert help_result.exit_code == 0 assert '--help Show this message and exit.' in help_result.output @unittest.skip('not implemented yet') def test_input_filename_required(self): raise NotImplementedError() @unittest.skip('not implemented yet') def test_outputfilename_required(self): raise NotImplementedError() @unittest.skip('not implemented yet') def test_errors_if_input_file_not_found(self): raise NotImplementedError() @unittest.skip('not implemented yet') def test_produces_output_file(self): raise NotImplementedError()
true
true
f7247100061d6774afb40c4bea380d62cdfa96b7
11,258
py
Python
modnet/matbench/benchmark.py
sparks-baird/modnet
2b4a88aa8a3323756b6daee52450569cddd0068b
[ "MIT" ]
32
2020-05-22T11:47:37.000Z
2022-03-28T12:55:40.000Z
modnet/matbench/benchmark.py
sparks-baird/modnet
2b4a88aa8a3323756b6daee52450569cddd0068b
[ "MIT" ]
43
2020-06-12T21:09:15.000Z
2022-03-28T21:05:43.000Z
modnet/matbench/benchmark.py
sparks-baird/modnet
2b4a88aa8a3323756b6daee52450569cddd0068b
[ "MIT" ]
22
2020-06-19T12:03:02.000Z
2022-01-17T16:13:38.000Z
import os from collections import defaultdict from traceback import print_exc from typing import List, Dict, Any, Optional, Tuple, Type import numpy as np from modnet.preprocessing import MODData from modnet.models import MODNetModel from modnet.utils import LOG from modnet.hyper_opt import FitGenetic MATBENCH_SEED = 18012019 def matbench_kfold_splits(data: MODData, n_splits=5, classification=False): """Return the pre-defined k-fold splits to use when reporting matbench results. Arguments: data: The featurized MODData. """ if classification: from sklearn.model_selection import StratifiedKFold as KFold else: from sklearn.model_selection import KFold kf = KFold(n_splits=n_splits, shuffle=True, random_state=MATBENCH_SEED) kf_splits = kf.split(data.df_featurized, y=data.df_targets) return kf_splits def matbench_benchmark( data: MODData, target: List[str], target_weights: Dict[str, float], fit_settings: Optional[Dict[str, Any]] = None, ga_settings: Optional[Dict[str, float]] = None, classification: bool = False, model_type: Type[MODNetModel] = MODNetModel, save_folds: bool = False, save_models: bool = False, hp_optimization: bool = True, hp_strategy: str = "fit_preset", inner_feat_selection: bool = True, use_precomputed_cross_nmi: bool = True, presets: Optional[List[dict]] = None, fast: bool = False, n_jobs: Optional[int] = None, nested: bool = False, **model_init_kwargs, ) -> dict: """Train and cross-validate a model against Matbench data splits, optionally performing hyperparameter optimisation. Arguments: data: The entire dataset as a `MODData`. target: The list of target names to train on. target_weights: The target weights to use for the `MODNetModel`. fit_settings: Any settings to pass to `model.fit(...)` directly (typically when not performing hyperparameter optimisation). classification: Whether all tasks are classification rather than regression. model_type: The type of the model to create and benchmark. save_folds: Whether to save dataframes with pre-processed fold data (e.g. feature selection). save_models: Whether to pickle all trained models according to their fold index and performance. hp_optimization: Whether to perform hyperparameter optimisation. hp_strategy: Which optimization strategy to choose. Use either \"fit_preset\" or \"ga\". inner_feat_selection: Whether to perform split-level feature selection or try to use pre-computed values. use_precomputed_cross_nmi: Whether to use the precmputed cross NMI from the Materials Project dataset, or recompute per fold. presets: Override the built-in hyperparameter grid with these presets. fast: Whether to perform debug training, i.e. reduced presets and epochs, for the fit_preset strategy. n_jobs: Try to parallelize the inner fit_preset over this number of processes. Maxes out at number_of_presets*nested_folds nested: Whether to perform nested CV for hyperparameter optimisation. **model_init_kwargs: Additional arguments to pass to the model on creation. Returns: A dictionary containing all the results from the training, broken down by model and by fold. """ if hp_optimization: if hp_strategy not in ["fit_preset", "ga"]: raise RuntimeError( f'{hp_strategy} not supported. Choose from "fit_genetic" or "ga".' ) if fit_settings is None: fit_settings = {} if not fit_settings.get("n_feat"): nf = len(data.df_featurized.columns) fit_settings["n_feat"] = nf if not fit_settings.get("num_neurons"): # Pass dummy network fit_settings["num_neurons"] = [[4], [4], [4], [4]] if ga_settings is None: ga_settings = { "size_pop": 20, "num_generations": 10, "early_stopping": 4, "refit": False, } fold_data = [] results = defaultdict(list) for ind, (train, test) in enumerate( matbench_kfold_splits(data, classification=classification) ): train_data, test_data = data.split((train, test)) if inner_feat_selection: path = "folds/train_moddata_f{}".format(ind + 1) if os.path.isfile(path): train_data = MODData.load(path) else: train_data.feature_selection( n=-1, use_precomputed_cross_nmi=use_precomputed_cross_nmi, n_jobs=n_jobs, ) os.makedirs("folds", exist_ok=True) train_data.save(path) fold_data.append((train_data, test_data)) args = (target, target_weights, fit_settings, ga_settings) model_kwargs = { "model_type": model_type, "hp_optimization": hp_optimization, "fast": fast, "classification": classification, "save_folds": save_folds, "presets": presets, "hp_strategy": hp_strategy, "save_models": save_models, "nested": nested, "n_jobs": n_jobs, } model_kwargs.update(model_init_kwargs) fold_results = [] for fold in enumerate(fold_data): fold_results.append(train_fold(fold, *args, **model_kwargs)) for fold in fold_results: for key in fold: results[key].append(fold[key]) return results def train_fold( fold: Tuple[int, Tuple[MODData, MODData]], target: List[str], target_weights: Dict[str, float], fit_settings: Dict[str, Any], ga_settings: Dict[str, float], model_type: Type[MODNetModel] = MODNetModel, presets=None, hp_optimization=True, hp_strategy="fit_preset", classification=False, save_folds=False, fast=False, save_models=False, nested=False, n_jobs=None, **model_kwargs, ) -> dict: """Train one fold of a CV. Unless stated, all arguments have the same meaning as in `matbench_benchmark(...)`. Arguments: fold: A tuple containing the fold index, and another tuple of the training MODData and test MODData. Returns: A dictionary summarising the fold results. """ fold_ind, (train_data, test_data) = fold results = {} multi_target = bool(len(target) - 1) # If not performing hp_optimization, load model init settings from fit_settings model_settings = {} if not hp_optimization: model_settings = { "num_neurons": fit_settings["num_neurons"], "num_classes": fit_settings.get("num_classes"), "act": fit_settings.get("act"), "out_act": fit_settings.get("out_act", "linear"), "n_feat": fit_settings["n_feat"], } model_settings.update(model_kwargs) if classification: model_settings["num_classes"] = {t: 2 for t in target_weights} model = model_type(target, target_weights, **model_settings) if hp_optimization: if hp_strategy == "fit_preset": ( models, val_losses, best_learning_curve, learning_curves, best_presets, ) = model.fit_preset( train_data, presets=presets, fast=fast, classification=classification, nested=nested, n_jobs=n_jobs, ) results["nested_losses"] = val_losses results["nested_learning_curves"] = learning_curves results["best_learning_curves"] = best_learning_curve results["best_presets"] = best_presets elif hp_strategy == "ga": ga = FitGenetic(train_data) model = ga.run( size_pop=ga_settings["size_pop"], num_generations=ga_settings["num_generations"], nested=nested, n_jobs=n_jobs, early_stopping=ga_settings["early_stopping"], refit=ga_settings["refit"], fast=fast, ) if save_models: for ind, nested_model in enumerate(models): score = val_losses[ind] nested_model.save(f"results/nested_model_{fold_ind}_{ind}_{score:3.3f}") model.save(f"results/best_model_{fold_ind}_{score:3.3f}") else: if fit_settings["increase_bs"]: model.fit( train_data, lr=fit_settings["lr"], epochs=fit_settings["epochs"], batch_size=fit_settings["batch_size"], loss=fit_settings["loss"], ) model.fit( train_data, lr=fit_settings["lr"] / 7, epochs=fit_settings["epochs"] // 2, batch_size=fit_settings["batch_size"] * 2, loss=fit_settings["loss"], ) else: model.fit(train_data, **fit_settings) try: predict_kwargs = {} if classification: predict_kwargs["return_prob"] = True if model.can_return_uncertainty: predict_kwargs["return_unc"] = True pred_results = model.predict(test_data, **predict_kwargs) if isinstance(pred_results, tuple): predictions, stds = pred_results else: predictions = pred_results stds = None targets = test_data.df_targets if classification: from sklearn.metrics import roc_auc_score from sklearn.preprocessing import OneHotEncoder y_true = OneHotEncoder().fit_transform(targets.values).toarray() score = roc_auc_score(y_true, predictions.values) pred_bool = model.predict(test_data, return_prob=False) LOG.info(f"ROC-AUC: {score}") errors = targets - pred_bool elif multi_target: errors = targets - predictions score = np.mean(np.abs(errors.values), axis=0) else: errors = targets - predictions score = np.mean(np.abs(errors.values)) except Exception: print_exc() print("Something went wrong benchmarking this model.") predictions = None errors = None score = None if save_folds: opt_feat = train_data.optimal_features[: fit_settings["n_feat"]] df_train = train_data.df_featurized df_train = df_train[opt_feat] df_train.to_csv("folds/train_f{}.csv".format(ind + 1)) df_test = test_data.df_featurized df_test = df_test[opt_feat] errors.columns = [x + "_error" for x in errors.columns] df_test = df_test.join(errors) df_test.to_csv("folds/test_f{}.csv".format(ind + 1)) results["predictions"] = predictions if stds is not None: results["stds"] = stds results["targets"] = targets results["errors"] = errors results["scores"] = score results["model"] = model return results
34.012085
110
0.618671
import os from collections import defaultdict from traceback import print_exc from typing import List, Dict, Any, Optional, Tuple, Type import numpy as np from modnet.preprocessing import MODData from modnet.models import MODNetModel from modnet.utils import LOG from modnet.hyper_opt import FitGenetic MATBENCH_SEED = 18012019 def matbench_kfold_splits(data: MODData, n_splits=5, classification=False): if classification: from sklearn.model_selection import StratifiedKFold as KFold else: from sklearn.model_selection import KFold kf = KFold(n_splits=n_splits, shuffle=True, random_state=MATBENCH_SEED) kf_splits = kf.split(data.df_featurized, y=data.df_targets) return kf_splits def matbench_benchmark( data: MODData, target: List[str], target_weights: Dict[str, float], fit_settings: Optional[Dict[str, Any]] = None, ga_settings: Optional[Dict[str, float]] = None, classification: bool = False, model_type: Type[MODNetModel] = MODNetModel, save_folds: bool = False, save_models: bool = False, hp_optimization: bool = True, hp_strategy: str = "fit_preset", inner_feat_selection: bool = True, use_precomputed_cross_nmi: bool = True, presets: Optional[List[dict]] = None, fast: bool = False, n_jobs: Optional[int] = None, nested: bool = False, **model_init_kwargs, ) -> dict: if hp_optimization: if hp_strategy not in ["fit_preset", "ga"]: raise RuntimeError( f'{hp_strategy} not supported. Choose from "fit_genetic" or "ga".' ) if fit_settings is None: fit_settings = {} if not fit_settings.get("n_feat"): nf = len(data.df_featurized.columns) fit_settings["n_feat"] = nf if not fit_settings.get("num_neurons"): fit_settings["num_neurons"] = [[4], [4], [4], [4]] if ga_settings is None: ga_settings = { "size_pop": 20, "num_generations": 10, "early_stopping": 4, "refit": False, } fold_data = [] results = defaultdict(list) for ind, (train, test) in enumerate( matbench_kfold_splits(data, classification=classification) ): train_data, test_data = data.split((train, test)) if inner_feat_selection: path = "folds/train_moddata_f{}".format(ind + 1) if os.path.isfile(path): train_data = MODData.load(path) else: train_data.feature_selection( n=-1, use_precomputed_cross_nmi=use_precomputed_cross_nmi, n_jobs=n_jobs, ) os.makedirs("folds", exist_ok=True) train_data.save(path) fold_data.append((train_data, test_data)) args = (target, target_weights, fit_settings, ga_settings) model_kwargs = { "model_type": model_type, "hp_optimization": hp_optimization, "fast": fast, "classification": classification, "save_folds": save_folds, "presets": presets, "hp_strategy": hp_strategy, "save_models": save_models, "nested": nested, "n_jobs": n_jobs, } model_kwargs.update(model_init_kwargs) fold_results = [] for fold in enumerate(fold_data): fold_results.append(train_fold(fold, *args, **model_kwargs)) for fold in fold_results: for key in fold: results[key].append(fold[key]) return results def train_fold( fold: Tuple[int, Tuple[MODData, MODData]], target: List[str], target_weights: Dict[str, float], fit_settings: Dict[str, Any], ga_settings: Dict[str, float], model_type: Type[MODNetModel] = MODNetModel, presets=None, hp_optimization=True, hp_strategy="fit_preset", classification=False, save_folds=False, fast=False, save_models=False, nested=False, n_jobs=None, **model_kwargs, ) -> dict: fold_ind, (train_data, test_data) = fold results = {} multi_target = bool(len(target) - 1) model_settings = {} if not hp_optimization: model_settings = { "num_neurons": fit_settings["num_neurons"], "num_classes": fit_settings.get("num_classes"), "act": fit_settings.get("act"), "out_act": fit_settings.get("out_act", "linear"), "n_feat": fit_settings["n_feat"], } model_settings.update(model_kwargs) if classification: model_settings["num_classes"] = {t: 2 for t in target_weights} model = model_type(target, target_weights, **model_settings) if hp_optimization: if hp_strategy == "fit_preset": ( models, val_losses, best_learning_curve, learning_curves, best_presets, ) = model.fit_preset( train_data, presets=presets, fast=fast, classification=classification, nested=nested, n_jobs=n_jobs, ) results["nested_losses"] = val_losses results["nested_learning_curves"] = learning_curves results["best_learning_curves"] = best_learning_curve results["best_presets"] = best_presets elif hp_strategy == "ga": ga = FitGenetic(train_data) model = ga.run( size_pop=ga_settings["size_pop"], num_generations=ga_settings["num_generations"], nested=nested, n_jobs=n_jobs, early_stopping=ga_settings["early_stopping"], refit=ga_settings["refit"], fast=fast, ) if save_models: for ind, nested_model in enumerate(models): score = val_losses[ind] nested_model.save(f"results/nested_model_{fold_ind}_{ind}_{score:3.3f}") model.save(f"results/best_model_{fold_ind}_{score:3.3f}") else: if fit_settings["increase_bs"]: model.fit( train_data, lr=fit_settings["lr"], epochs=fit_settings["epochs"], batch_size=fit_settings["batch_size"], loss=fit_settings["loss"], ) model.fit( train_data, lr=fit_settings["lr"] / 7, epochs=fit_settings["epochs"] // 2, batch_size=fit_settings["batch_size"] * 2, loss=fit_settings["loss"], ) else: model.fit(train_data, **fit_settings) try: predict_kwargs = {} if classification: predict_kwargs["return_prob"] = True if model.can_return_uncertainty: predict_kwargs["return_unc"] = True pred_results = model.predict(test_data, **predict_kwargs) if isinstance(pred_results, tuple): predictions, stds = pred_results else: predictions = pred_results stds = None targets = test_data.df_targets if classification: from sklearn.metrics import roc_auc_score from sklearn.preprocessing import OneHotEncoder y_true = OneHotEncoder().fit_transform(targets.values).toarray() score = roc_auc_score(y_true, predictions.values) pred_bool = model.predict(test_data, return_prob=False) LOG.info(f"ROC-AUC: {score}") errors = targets - pred_bool elif multi_target: errors = targets - predictions score = np.mean(np.abs(errors.values), axis=0) else: errors = targets - predictions score = np.mean(np.abs(errors.values)) except Exception: print_exc() print("Something went wrong benchmarking this model.") predictions = None errors = None score = None if save_folds: opt_feat = train_data.optimal_features[: fit_settings["n_feat"]] df_train = train_data.df_featurized df_train = df_train[opt_feat] df_train.to_csv("folds/train_f{}.csv".format(ind + 1)) df_test = test_data.df_featurized df_test = df_test[opt_feat] errors.columns = [x + "_error" for x in errors.columns] df_test = df_test.join(errors) df_test.to_csv("folds/test_f{}.csv".format(ind + 1)) results["predictions"] = predictions if stds is not None: results["stds"] = stds results["targets"] = targets results["errors"] = errors results["scores"] = score results["model"] = model return results
true
true
f7247128248055fc8b3fc7e0f99d36f794357c24
5,958
py
Python
utils/evaluation.py
lippman1125/pytorch_FAN
ffc9c968478d55cb0c75c062bb8774923f961110
[ "BSD-3-Clause" ]
58
2019-03-14T20:13:10.000Z
2022-03-17T07:59:34.000Z
utils/evaluation.py
lippman1125/pytorch_FAN
ffc9c968478d55cb0c75c062bb8774923f961110
[ "BSD-3-Clause" ]
7
2019-03-29T05:13:39.000Z
2021-02-08T23:00:32.000Z
utils/evaluation.py
lippman1125/pytorch_FAN
ffc9c968478d55cb0c75c062bb8774923f961110
[ "BSD-3-Clause" ]
8
2019-05-29T09:05:32.000Z
2022-03-12T17:00:02.000Z
from __future__ import absolute_import, print_function import math import numpy as np import matplotlib.pyplot as plt from random import randint from .misc import * from .transforms import transform, transform_preds __all__ = ['accuracy', 'AverageMeter'] def get_preds(scores): ''' get predictions from score maps in torch Tensor return type: torch.LongTensor ''' assert scores.dim() == 4, 'Score maps should be 4-dim' # batch, chn, height, width ===> batch, chn, height*width # chn = 68 # height*width = score_map maxval, idx = torch.max(scores.view(scores.size(0), scores.size(1), -1), 2) maxval = maxval.view(scores.size(0), scores.size(1), 1) idx = idx.view(scores.size(0), scores.size(1), 1) + 1 preds = idx.repeat(1, 1, 2).float() # batchsize * numPoints * 2 # 0 is x coord # 1 is y coord # shape = batchsize, numPoints, 2 preds[:, :, 0] = (preds[:, :, 0] - 1) % scores.size(3) + 1 preds[:, :, 1] = torch.floor((preds[:, :, 1] - 1) / scores.size(2)) + 1 pred_mask = maxval.gt(0).repeat(1, 1, 2).float() preds *= pred_mask return preds def calc_dists(preds, target, normalize): preds = preds.float() target = target.float() # dists = 68 x batch dists = torch.zeros(preds.size(1), preds.size(0)) for n in range(preds.size(0)): for c in range(preds.size(1)): if target[n, c, 0] > 1 and target[n, c, 1] > 1: dists[c, n] = torch.dist(preds[n, c, :], target[n, c, :]) / normalize[n] else: dists[c, n] = -1 return dists def dist_acc(dists, thr=0.5): ''' Return percentage below threshold while ignoring values with a -1 ''' if dists.ne(-1).sum() > 0: return dists.le(thr).eq(dists.ne(-1)).sum() * 1.0 / dists.ne(-1).sum() else: return -1 def calc_metrics(dists, path='', category=''): errors = torch.mean(dists, 0).view(dists.size(1)) axes1 = np.linspace(0, 1, 1000) axes2 = np.zeros(1000) for i in range(1000): axes2[i] = float((errors < axes1[i]).sum()) / float(errors.size(0)) auc = round(np.sum(axes2[:70]) / .7, 2) if path: label = '{}({}) : {}'.format(path.split('/')[2], category, str(auc)) plt.xlim(0, 7) plt.ylim(0, 100) plt.yticks(np.arange(0, 110, 10)) plt.xticks(np.arange(0, 8, 1)) plt.grid() plt.title('NME (%)', fontsize=20) plt.xlabel('NME (%)', fontsize=16) plt.ylabel('Test images (%)', fontsize=16) if category: if category in ['Easy', 'Category A']: plt.plot(axes1 * 100, axes2 * 100, 'b-', label=label, lw=3) if category in ['Media', 'Category B']: plt.plot(axes1 * 100, axes2 * 100, 'r-', label=label, lw=3) if category in ['Hard', 'Category C']: plt.plot(axes1 * 100, axes2 * 100, 'g-', label=label, lw=3) else: plt.plot(axes1 * 100, axes2 * 100, 'b-', label=label, lw=3) plt.legend(loc=4, fontsize=12) plt.savefig(os.path.join(path + '/CED.eps')) return auc def _get_bboxsize(iterable): # iterable = 68 x 2 # torch.min return values, idxs mins = torch.min(iterable, 0)[0].view(2) maxs = torch.max(iterable, 0)[0].view(2) center = torch.FloatTensor((maxs[0] - (maxs[0] - mins[0]) / 2, maxs[1] - (maxs[1] - mins[1]) / 2)) # center[1] = center[1] - ((maxs[1] - mins[1]) * 0.12) return np.sqrt(abs(maxs[0] - mins[0]) * abs(maxs[1] - mins[1])) def accuracy(output, target, idxs, thr=0.08): ''' Calculate accuracy according to NME, but uses ground truth heatmap rather than x,y locations First value to be returned is accuracy calculated based on overall 'idxs' followed by individual accuracies ''' # preds = batch, 68, 64, 64 preds = get_preds(output) gts = get_preds(target) # B * 2 norm = torch.ones(preds.size(0)) # use face bbox to normalize for i, gt in enumerate(gts): norm[i] = _get_bboxsize(gt) dists = calc_dists(preds, gts, norm) acc = torch.zeros(len(idxs) + 1) avg_acc = 0 cnt = 0 mean_dists = torch.mean(dists, 0) acc[0] = mean_dists.le(thr).sum() * 1.0 / preds.size(0) # for i in range(len(idxs)): # acc[i+1] = dist_acc(dists[idxs[i]-1], thr=thr) # if acc[i+1] >= 0: # avg_acc = avg_acc + acc[i+1] # cnt += 1 # if cnt != 0: # acc[0] = avg_acc / cnt return acc, dists def final_preds(output, center, scale, res): if output.size(1) == 136: coords = output.view((output.szie(0), 68, 2)) else: coords = get_preds(output) # float type # output shape is batch, 68, 64, 64 # coords shape is batch, 68, 2 # pose-processing for n in range(coords.size(0)): for p in range(coords.size(1)): hm = output[n][p] px = int(math.floor(coords[n][p][0])) py = int(math.floor(coords[n][p][1])) if px > 1 and px < res[0] and py > 1 and py < res[1]: diff = torch.Tensor( [hm[py - 1][px] - hm[py - 1][px - 2], hm[py][px - 1] - hm[py - 2][px - 1]]) coords[n][p] += diff.sign() * .25 coords += 0.5 preds = coords.clone() # Transform back for i in range(coords.size(0)): preds[i] = transform_preds(coords[i], center[i], scale[i], res) if preds.dim() < 3: preds = preds.view(1, preds.size()) return preds class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count
31.193717
100
0.553206
from __future__ import absolute_import, print_function import math import numpy as np import matplotlib.pyplot as plt from random import randint from .misc import * from .transforms import transform, transform_preds __all__ = ['accuracy', 'AverageMeter'] def get_preds(scores): assert scores.dim() == 4, 'Score maps should be 4-dim' maxval, idx = torch.max(scores.view(scores.size(0), scores.size(1), -1), 2) maxval = maxval.view(scores.size(0), scores.size(1), 1) idx = idx.view(scores.size(0), scores.size(1), 1) + 1 preds = idx.repeat(1, 1, 2).float() preds[:, :, 0] = (preds[:, :, 0] - 1) % scores.size(3) + 1 preds[:, :, 1] = torch.floor((preds[:, :, 1] - 1) / scores.size(2)) + 1 pred_mask = maxval.gt(0).repeat(1, 1, 2).float() preds *= pred_mask return preds def calc_dists(preds, target, normalize): preds = preds.float() target = target.float() dists = torch.zeros(preds.size(1), preds.size(0)) for n in range(preds.size(0)): for c in range(preds.size(1)): if target[n, c, 0] > 1 and target[n, c, 1] > 1: dists[c, n] = torch.dist(preds[n, c, :], target[n, c, :]) / normalize[n] else: dists[c, n] = -1 return dists def dist_acc(dists, thr=0.5): if dists.ne(-1).sum() > 0: return dists.le(thr).eq(dists.ne(-1)).sum() * 1.0 / dists.ne(-1).sum() else: return -1 def calc_metrics(dists, path='', category=''): errors = torch.mean(dists, 0).view(dists.size(1)) axes1 = np.linspace(0, 1, 1000) axes2 = np.zeros(1000) for i in range(1000): axes2[i] = float((errors < axes1[i]).sum()) / float(errors.size(0)) auc = round(np.sum(axes2[:70]) / .7, 2) if path: label = '{}({}) : {}'.format(path.split('/')[2], category, str(auc)) plt.xlim(0, 7) plt.ylim(0, 100) plt.yticks(np.arange(0, 110, 10)) plt.xticks(np.arange(0, 8, 1)) plt.grid() plt.title('NME (%)', fontsize=20) plt.xlabel('NME (%)', fontsize=16) plt.ylabel('Test images (%)', fontsize=16) if category: if category in ['Easy', 'Category A']: plt.plot(axes1 * 100, axes2 * 100, 'b-', label=label, lw=3) if category in ['Media', 'Category B']: plt.plot(axes1 * 100, axes2 * 100, 'r-', label=label, lw=3) if category in ['Hard', 'Category C']: plt.plot(axes1 * 100, axes2 * 100, 'g-', label=label, lw=3) else: plt.plot(axes1 * 100, axes2 * 100, 'b-', label=label, lw=3) plt.legend(loc=4, fontsize=12) plt.savefig(os.path.join(path + '/CED.eps')) return auc def _get_bboxsize(iterable): mins = torch.min(iterable, 0)[0].view(2) maxs = torch.max(iterable, 0)[0].view(2) center = torch.FloatTensor((maxs[0] - (maxs[0] - mins[0]) / 2, maxs[1] - (maxs[1] - mins[1]) / 2)) return np.sqrt(abs(maxs[0] - mins[0]) * abs(maxs[1] - mins[1])) def accuracy(output, target, idxs, thr=0.08): preds = get_preds(output) gts = get_preds(target) norm = torch.ones(preds.size(0)) for i, gt in enumerate(gts): norm[i] = _get_bboxsize(gt) dists = calc_dists(preds, gts, norm) acc = torch.zeros(len(idxs) + 1) avg_acc = 0 cnt = 0 mean_dists = torch.mean(dists, 0) acc[0] = mean_dists.le(thr).sum() * 1.0 / preds.size(0) return acc, dists def final_preds(output, center, scale, res): if output.size(1) == 136: coords = output.view((output.szie(0), 68, 2)) else: coords = get_preds(output) for n in range(coords.size(0)): for p in range(coords.size(1)): hm = output[n][p] px = int(math.floor(coords[n][p][0])) py = int(math.floor(coords[n][p][1])) if px > 1 and px < res[0] and py > 1 and py < res[1]: diff = torch.Tensor( [hm[py - 1][px] - hm[py - 1][px - 2], hm[py][px - 1] - hm[py - 2][px - 1]]) coords[n][p] += diff.sign() * .25 coords += 0.5 preds = coords.clone() for i in range(coords.size(0)): preds[i] = transform_preds(coords[i], center[i], scale[i], res) if preds.dim() < 3: preds = preds.view(1, preds.size()) return preds class AverageMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count
true
true
f72471beb538717956885400bdf193fde9f2aea9
5,531
py
Python
api/routes/admin/admin_student_routes.py
NoisyBotDude/MIS-Backend
fa402b0a6d5d6862634b0ed55bc57178856c1eba
[ "MIT" ]
null
null
null
api/routes/admin/admin_student_routes.py
NoisyBotDude/MIS-Backend
fa402b0a6d5d6862634b0ed55bc57178856c1eba
[ "MIT" ]
null
null
null
api/routes/admin/admin_student_routes.py
NoisyBotDude/MIS-Backend
fa402b0a6d5d6862634b0ed55bc57178856c1eba
[ "MIT" ]
null
null
null
from fileinput import filename from urllib.request import Request from api.drivers import student from api.drivers.student import student_drivers from api.middlewares import authentication_middleware from api.schemas.admin.admin_request_schema import admin_request_schemas from api.schemas.student.request_schemas import student_request_schemas from api.schemas.student.response_schemas import student_response_schemas from api.utils.exceptions import exceptions from fastapi import APIRouter, Depends, HTTPException, Request, status from fastapi.responses import JSONResponse from api.repository import admin_repo from api.utils.save_student_data import save_data from starlette.responses import FileResponse import json def construct_router(): admin = APIRouter(tags=["Admin"]) @admin.post("/notify/student") async def notify_by_batch(): pass @admin.post("/add/student/subscription") async def add_student_subscription( request: admin_request_schemas.ManipulateStudentSubscriptionSchema, ): try: response = await student_drivers.Student().update_array_of_str( request.__dict__ ) return JSONResponse(status_code=200, content={"message": "info updated"}) except exceptions.DuplicateStudent: return JSONResponse( status_code=409, content={"message": "info cannot be updated"} ) except exceptions.UnexpectedError: return JSONResponse( status_code=500, content={"message": "internal server error"} ) @admin.post("/remove/student/subscription") async def remove_student_subscription( request: admin_request_schemas.ManipulateStudentSubscriptionSchema, ): try: response = await student_drivers.Student().delete_from_array_of_str( request.__dict__ ) if response: return JSONResponse( status_code=200, content={"message": "subscription deleted successfully"}, ) return JSONResponse( status_code=500, content={"message": "subscription deletion failed"} ) except exceptions.DuplicateStudent: return JSONResponse( status_code=409, content={"message": "info cannot be updated"} ) except exceptions.UnexpectedError: return JSONResponse( status_code=500, content={"message": "internal server error"} ) @admin.post("/verify/student") async def verify_student(request: Request): request = await request.json() response = await admin_repo.assign_otp(request["student_ids"]) if response: return JSONResponse( status_code=200, content={"message": "otp assigned successfully"} ) return JSONResponse( status_code=500, content={ "message": """otp cannot be assigned successfully for all student""" }, ) @admin.get("/ban/student/{student_id}") async def ban_student_account(student_id: str): response = await student_drivers.Student().ban_student(student_id) if response == "already_banned": return JSONResponse( status_code=404, content={"message": "student aleady banned"} ) elif response: return JSONResponse( status_code=200, content={"message": "student banned successfully"} ) return JSONResponse( status_code=500, content={"message": "internal server error"} ) @admin.delete("/delete/student/{student_id}") async def delete_student_account(student_id: str): response = await student_drivers.Student().delete_student(student_id) if response: return JSONResponse( status_code=200, content={"message": "student deleted successfully"} ) return JSONResponse( status_code=404, content={"message": "student does not exist"} ) @admin.get("/all_student") async def get_student_profile(): try: response = await ( student_drivers.Student().get_all_students() ) return JSONResponse( status_code=200, content=response ) except Exception as e: print(e, "exception") @admin.post("/student/data") async def get_student_data(): students = await ( student_drivers.Student().get_all_students_data() ) # print(students) save_data(students) if students: return JSONResponse( status_code=200, content= { "message" : "training details saved succesfully" } ) return JSONResponse( status_code=500, content= { "message" : "training details cannot be saved" } ) @admin.get("/student/data") async def get_student_data(request: Request): filename = "student_data.xls" #send excel file return FileResponse( filename, filename="student_data.xls", status_code=200, media_type="application/vnd.ms-excel" ) return admin
31.248588
85
0.605496
from fileinput import filename from urllib.request import Request from api.drivers import student from api.drivers.student import student_drivers from api.middlewares import authentication_middleware from api.schemas.admin.admin_request_schema import admin_request_schemas from api.schemas.student.request_schemas import student_request_schemas from api.schemas.student.response_schemas import student_response_schemas from api.utils.exceptions import exceptions from fastapi import APIRouter, Depends, HTTPException, Request, status from fastapi.responses import JSONResponse from api.repository import admin_repo from api.utils.save_student_data import save_data from starlette.responses import FileResponse import json def construct_router(): admin = APIRouter(tags=["Admin"]) @admin.post("/notify/student") async def notify_by_batch(): pass @admin.post("/add/student/subscription") async def add_student_subscription( request: admin_request_schemas.ManipulateStudentSubscriptionSchema, ): try: response = await student_drivers.Student().update_array_of_str( request.__dict__ ) return JSONResponse(status_code=200, content={"message": "info updated"}) except exceptions.DuplicateStudent: return JSONResponse( status_code=409, content={"message": "info cannot be updated"} ) except exceptions.UnexpectedError: return JSONResponse( status_code=500, content={"message": "internal server error"} ) @admin.post("/remove/student/subscription") async def remove_student_subscription( request: admin_request_schemas.ManipulateStudentSubscriptionSchema, ): try: response = await student_drivers.Student().delete_from_array_of_str( request.__dict__ ) if response: return JSONResponse( status_code=200, content={"message": "subscription deleted successfully"}, ) return JSONResponse( status_code=500, content={"message": "subscription deletion failed"} ) except exceptions.DuplicateStudent: return JSONResponse( status_code=409, content={"message": "info cannot be updated"} ) except exceptions.UnexpectedError: return JSONResponse( status_code=500, content={"message": "internal server error"} ) @admin.post("/verify/student") async def verify_student(request: Request): request = await request.json() response = await admin_repo.assign_otp(request["student_ids"]) if response: return JSONResponse( status_code=200, content={"message": "otp assigned successfully"} ) return JSONResponse( status_code=500, content={ "message": """otp cannot be assigned successfully for all student""" }, ) @admin.get("/ban/student/{student_id}") async def ban_student_account(student_id: str): response = await student_drivers.Student().ban_student(student_id) if response == "already_banned": return JSONResponse( status_code=404, content={"message": "student aleady banned"} ) elif response: return JSONResponse( status_code=200, content={"message": "student banned successfully"} ) return JSONResponse( status_code=500, content={"message": "internal server error"} ) @admin.delete("/delete/student/{student_id}") async def delete_student_account(student_id: str): response = await student_drivers.Student().delete_student(student_id) if response: return JSONResponse( status_code=200, content={"message": "student deleted successfully"} ) return JSONResponse( status_code=404, content={"message": "student does not exist"} ) @admin.get("/all_student") async def get_student_profile(): try: response = await ( student_drivers.Student().get_all_students() ) return JSONResponse( status_code=200, content=response ) except Exception as e: print(e, "exception") @admin.post("/student/data") async def get_student_data(): students = await ( student_drivers.Student().get_all_students_data() ) save_data(students) if students: return JSONResponse( status_code=200, content= { "message" : "training details saved succesfully" } ) return JSONResponse( status_code=500, content= { "message" : "training details cannot be saved" } ) @admin.get("/student/data") async def get_student_data(request: Request): filename = "student_data.xls" return FileResponse( filename, filename="student_data.xls", status_code=200, media_type="application/vnd.ms-excel" ) return admin
true
true
f724734a995b0486052f20cb0dc53813c6e312e3
6,915
py
Python
analysis/views/views_karyomapping.py
SACGF/variantgrid
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
[ "RSA-MD" ]
5
2021-01-14T03:34:42.000Z
2022-03-07T15:34:18.000Z
analysis/views/views_karyomapping.py
SACGF/variantgrid
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
[ "RSA-MD" ]
551
2020-10-19T00:02:38.000Z
2022-03-30T02:18:22.000Z
analysis/views/views_karyomapping.py
SACGF/variantgrid
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
[ "RSA-MD" ]
null
null
null
import csv from collections import defaultdict, OrderedDict from django.core.exceptions import PermissionDenied from django.http.response import StreamingHttpResponse from django.shortcuts import get_object_or_404, render, redirect from django.urls.base import reverse from django.views.decorators.cache import cache_page from django.views.decorators.vary import vary_on_cookie from analysis.forms import KaryomappingGeneForm, UserTrioForm from analysis.models.models_karyomapping import KaryomappingAnalysis, KaryotypeBins, \ KaryomappingGene from library.constants import DAY_SECS from library.django_utils import add_save_message from library.jqgrid_export import StashFile from patients.models_enums import Zygosity from snpdb.models import Trio from snpdb.models.models_variant import Variant def get_karyomapping_analysis_permission_check(request, pk): ka = get_object_or_404(KaryomappingAnalysis, pk=pk) if not request.user.has_perm(KaryomappingAnalysis.get_read_perm(), ka): msg = f"{request.user} does not have permission to access {ka}" raise PermissionDenied(msg) return ka def karyomapping_analyses(request): context = {"trio_form": UserTrioForm()} return render(request, 'analysis/karyomapping/karyomapping_analyses.html', context) def create_and_view_karyomapping_analysis_for_trio(trio, user): karyomapping = KaryomappingAnalysis.objects.create(user=user, name=trio.name, trio=trio) url = reverse("view_karyomapping_analysis", kwargs={"pk": karyomapping.pk}) return redirect(url) def create_karyomapping_analysis_for_trio_id(request, trio_id): trio = Trio.get_for_user(request.user, trio_id) return create_and_view_karyomapping_analysis_for_trio(trio, request.user) def view_karyomapping_analysis(request, pk): karyomapping_analysis = get_karyomapping_analysis_permission_check(request, pk) gene_form = KaryomappingGeneForm(request.POST or None, karyomapping_analysis=karyomapping_analysis, initial={"upstream_kb": 2000, "downstream_kb": 2000}) created_karyomapping_gene = None if request.method == "POST": valid = gene_form.is_valid() if valid: created_karyomapping_gene = gene_form.save() add_save_message(request, valid, "KaryomappingGene") context = {"karyomapping_analysis": karyomapping_analysis, "gene_form": gene_form, "has_write_permission": karyomapping_analysis.can_write(request.user), "created_karyomapping_gene": created_karyomapping_gene} return render(request, 'analysis/karyomapping/view_karyomapping_analysis.html', context) def get_variant_lookup_and_scatter_data(karyomapping_bins): """ Dumped to JS to be used by Plotly scatterplot karyomapping_bins : Have separate entries for ALT/REF, we merge these for output """ variant_id_lookup = {} data = defaultdict(lambda: defaultdict(list)) for karyotype_code, variant_data in karyomapping_bins.items(): x = [] text = [] for variant_id, chrom, position, ref, alt in variant_data: variant_string = Variant.format_tuple(chrom, position, ref, alt) variant_id_lookup[variant_string] = variant_id x.append(position) text.append(variant_string) collapsed_code = KaryotypeBins.COLLAPSED_BINS[karyotype_code] data[collapsed_code]["x"].extend(x) data[collapsed_code]["text"].extend(text) karyotype_bin_counts = OrderedDict() for kc in KaryotypeBins.KARYOTYPE_LABEL_ORDER: karyotype_bin_counts[kc] = len(data[kc]["x"]) return variant_id_lookup, data, karyotype_bin_counts @cache_page(DAY_SECS) # Only caching this for a day due to high amount of development @vary_on_cookie def view_karyomapping_gene(request, pk): karyomapping_gene = get_object_or_404(KaryomappingGene, pk=pk) # Permission check on parent karyomapping_analysis get_karyomapping_analysis_permission_check(request, karyomapping_gene.karyomapping_analysis.pk) iv, strand = karyomapping_gene.get_genomic_interval_and_strand() karyomapping_bins = karyomapping_gene.get_karyomapping_bins() variant_id_lookup, karyotype_bin_scatter_data, karyotype_bin_counts = get_variant_lookup_and_scatter_data(karyomapping_bins) context = {"kag": karyomapping_gene, "iv": iv, "strand": strand, "karyotype_bin_labels": KaryotypeBins.KARYOTYPE_LABEL_ORDER, "karyotype_bin_labels": KaryotypeBins.KARYOTYPE_LABEL_ORDER, # Want order "variant_id_lookup": variant_id_lookup, "karyotype_bin_scatter_data": karyotype_bin_scatter_data, "karyotype_bin_counts": karyotype_bin_counts} return render(request, 'analysis/karyomapping/view_karyomapping_gene.html', context) def download_karyomapping_gene_csv(request, pk): karyomapping_gene = get_object_or_404(KaryomappingGene, pk=pk) # Permission check on parent karyomapping_analysis get_karyomapping_analysis_permission_check(request, karyomapping_gene.karyomapping_analysis.pk) variant_and_genotypes = karyomapping_gene.get_variant_and_genotypes() filename = f"karyomapping_gene_{karyomapping_gene.pk}_{karyomapping_gene}.csv" # TODO: merge code w/library.jqgrid_export.grid_export_csv karotype_bin_lookup = KaryotypeBins.get_karotype_bin_lookup() header = ['chrom', 'position', 'ref', 'alt', 'proband_gt', 'father_gt', 'mother_gt', 'karyotype_bin'] pseudo_buffer = StashFile() writer = csv.DictWriter(pseudo_buffer, header, dialect='excel') def iter_row_writer(): writer.writeheader() yield pseudo_buffer.value for variant_data, genotype_tuple in variant_and_genotypes: _, chrom, position, ref, alt = variant_data proband_gt, father_gt, mother_gt = genotype_tuple try: karotype_bin = karotype_bin_lookup[proband_gt][father_gt][mother_gt] except: karotype_bin = '' row = {'chrom': chrom, 'position': position, 'ref': ref, 'alt': alt, 'proband_gt': Zygosity.get_genotype(proband_gt), 'father_gt': Zygosity.get_genotype(father_gt), 'mother_gt': Zygosity.get_genotype(mother_gt), 'karyotype_bin': karotype_bin} writer.writerow(row) yield pseudo_buffer.value response = StreamingHttpResponse(iter_row_writer(), content_type="text/csv") response['Content-Disposition'] = f'attachment; filename="{filename}"' return response
43.21875
128
0.707881
import csv from collections import defaultdict, OrderedDict from django.core.exceptions import PermissionDenied from django.http.response import StreamingHttpResponse from django.shortcuts import get_object_or_404, render, redirect from django.urls.base import reverse from django.views.decorators.cache import cache_page from django.views.decorators.vary import vary_on_cookie from analysis.forms import KaryomappingGeneForm, UserTrioForm from analysis.models.models_karyomapping import KaryomappingAnalysis, KaryotypeBins, \ KaryomappingGene from library.constants import DAY_SECS from library.django_utils import add_save_message from library.jqgrid_export import StashFile from patients.models_enums import Zygosity from snpdb.models import Trio from snpdb.models.models_variant import Variant def get_karyomapping_analysis_permission_check(request, pk): ka = get_object_or_404(KaryomappingAnalysis, pk=pk) if not request.user.has_perm(KaryomappingAnalysis.get_read_perm(), ka): msg = f"{request.user} does not have permission to access {ka}" raise PermissionDenied(msg) return ka def karyomapping_analyses(request): context = {"trio_form": UserTrioForm()} return render(request, 'analysis/karyomapping/karyomapping_analyses.html', context) def create_and_view_karyomapping_analysis_for_trio(trio, user): karyomapping = KaryomappingAnalysis.objects.create(user=user, name=trio.name, trio=trio) url = reverse("view_karyomapping_analysis", kwargs={"pk": karyomapping.pk}) return redirect(url) def create_karyomapping_analysis_for_trio_id(request, trio_id): trio = Trio.get_for_user(request.user, trio_id) return create_and_view_karyomapping_analysis_for_trio(trio, request.user) def view_karyomapping_analysis(request, pk): karyomapping_analysis = get_karyomapping_analysis_permission_check(request, pk) gene_form = KaryomappingGeneForm(request.POST or None, karyomapping_analysis=karyomapping_analysis, initial={"upstream_kb": 2000, "downstream_kb": 2000}) created_karyomapping_gene = None if request.method == "POST": valid = gene_form.is_valid() if valid: created_karyomapping_gene = gene_form.save() add_save_message(request, valid, "KaryomappingGene") context = {"karyomapping_analysis": karyomapping_analysis, "gene_form": gene_form, "has_write_permission": karyomapping_analysis.can_write(request.user), "created_karyomapping_gene": created_karyomapping_gene} return render(request, 'analysis/karyomapping/view_karyomapping_analysis.html', context) def get_variant_lookup_and_scatter_data(karyomapping_bins): variant_id_lookup = {} data = defaultdict(lambda: defaultdict(list)) for karyotype_code, variant_data in karyomapping_bins.items(): x = [] text = [] for variant_id, chrom, position, ref, alt in variant_data: variant_string = Variant.format_tuple(chrom, position, ref, alt) variant_id_lookup[variant_string] = variant_id x.append(position) text.append(variant_string) collapsed_code = KaryotypeBins.COLLAPSED_BINS[karyotype_code] data[collapsed_code]["x"].extend(x) data[collapsed_code]["text"].extend(text) karyotype_bin_counts = OrderedDict() for kc in KaryotypeBins.KARYOTYPE_LABEL_ORDER: karyotype_bin_counts[kc] = len(data[kc]["x"]) return variant_id_lookup, data, karyotype_bin_counts @cache_page(DAY_SECS) @vary_on_cookie def view_karyomapping_gene(request, pk): karyomapping_gene = get_object_or_404(KaryomappingGene, pk=pk) get_karyomapping_analysis_permission_check(request, karyomapping_gene.karyomapping_analysis.pk) iv, strand = karyomapping_gene.get_genomic_interval_and_strand() karyomapping_bins = karyomapping_gene.get_karyomapping_bins() variant_id_lookup, karyotype_bin_scatter_data, karyotype_bin_counts = get_variant_lookup_and_scatter_data(karyomapping_bins) context = {"kag": karyomapping_gene, "iv": iv, "strand": strand, "karyotype_bin_labels": KaryotypeBins.KARYOTYPE_LABEL_ORDER, "karyotype_bin_labels": KaryotypeBins.KARYOTYPE_LABEL_ORDER, "variant_id_lookup": variant_id_lookup, "karyotype_bin_scatter_data": karyotype_bin_scatter_data, "karyotype_bin_counts": karyotype_bin_counts} return render(request, 'analysis/karyomapping/view_karyomapping_gene.html', context) def download_karyomapping_gene_csv(request, pk): karyomapping_gene = get_object_or_404(KaryomappingGene, pk=pk) get_karyomapping_analysis_permission_check(request, karyomapping_gene.karyomapping_analysis.pk) variant_and_genotypes = karyomapping_gene.get_variant_and_genotypes() filename = f"karyomapping_gene_{karyomapping_gene.pk}_{karyomapping_gene}.csv" karotype_bin_lookup = KaryotypeBins.get_karotype_bin_lookup() header = ['chrom', 'position', 'ref', 'alt', 'proband_gt', 'father_gt', 'mother_gt', 'karyotype_bin'] pseudo_buffer = StashFile() writer = csv.DictWriter(pseudo_buffer, header, dialect='excel') def iter_row_writer(): writer.writeheader() yield pseudo_buffer.value for variant_data, genotype_tuple in variant_and_genotypes: _, chrom, position, ref, alt = variant_data proband_gt, father_gt, mother_gt = genotype_tuple try: karotype_bin = karotype_bin_lookup[proband_gt][father_gt][mother_gt] except: karotype_bin = '' row = {'chrom': chrom, 'position': position, 'ref': ref, 'alt': alt, 'proband_gt': Zygosity.get_genotype(proband_gt), 'father_gt': Zygosity.get_genotype(father_gt), 'mother_gt': Zygosity.get_genotype(mother_gt), 'karyotype_bin': karotype_bin} writer.writerow(row) yield pseudo_buffer.value response = StreamingHttpResponse(iter_row_writer(), content_type="text/csv") response['Content-Disposition'] = f'attachment; filename="{filename}"' return response
true
true
f724736b6941b37c7e6fc68854f6f1512721115e
1,762
py
Python
CondTools/IntegrationTest/python/validate_dt_devdb10_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
CondTools/IntegrationTest/python/validate_dt_devdb10_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
CondTools/IntegrationTest/python/validate_dt_devdb10_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
# The following comments couldn't be translated into the new config version: # Configuration file for EventSetupTest_t import FWCore.ParameterSet.Config as cms process = cms.Process("TEST") process.PoolDBESSource = cms.ESSource("PoolDBESSource", loadAll = cms.bool(True), toGet = cms.VPSet(cms.PSet( record = cms.string('DTT0Rcd'), tag = cms.string('MTCC_t0') ), cms.PSet( record = cms.string('DTTtrigRcd'), tag = cms.string('MTCC_tTrig') ), cms.PSet( record = cms.string('DTReadOutMappingRcd'), tag = cms.string('MTCC_map') )), messagelevel = cms.untracked.uint32(2), catalog = cms.untracked.string('relationalcatalog_oracle://devdb10/CMS_COND_GENERAL'), ##devdb10/CMS_COND_GENERAL" timetype = cms.string('runnumber'), connect = cms.string('oracle://devdb10/CMS_COND_DT'), ##devdb10/CMS_COND_DT" authenticationMethod = cms.untracked.uint32(1) ) process.source = cms.Source("EmptySource", maxEvents = cms.untracked.int32(5), numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.get = cms.EDAnalyzer("EventSetupRecordDataGetter", toGet = cms.VPSet(cms.PSet( record = cms.string('DTT0Rcd'), data = cms.vstring('DTT0') ), cms.PSet( record = cms.string('DTTtrigRcd'), data = cms.vstring('DTTtrig') ), cms.PSet( record = cms.string('DTReadOutMappingRcd'), data = cms.vstring('DTReadOutMapping') )), verbose = cms.untracked.bool(True) ) process.printer = cms.OutputModule("AsciiOutputModule") process.p = cms.Path(process.get) process.ep = cms.EndPath(process.printer)
30.37931
118
0.641317
# Configuration file for EventSetupTest_t import FWCore.ParameterSet.Config as cms process = cms.Process("TEST") process.PoolDBESSource = cms.ESSource("PoolDBESSource", loadAll = cms.bool(True), toGet = cms.VPSet(cms.PSet( record = cms.string('DTT0Rcd'), tag = cms.string('MTCC_t0') ), cms.PSet( record = cms.string('DTTtrigRcd'), tag = cms.string('MTCC_tTrig') ), cms.PSet( record = cms.string('DTReadOutMappingRcd'), tag = cms.string('MTCC_map') )), messagelevel = cms.untracked.uint32(2), catalog = cms.untracked.string('relationalcatalog_oracle://devdb10/CMS_COND_GENERAL'), ##devdb10/CMS_COND_GENERAL" timetype = cms.string('runnumber'), connect = cms.string('oracle://devdb10/CMS_COND_DT'), ##devdb10/CMS_COND_DT" authenticationMethod = cms.untracked.uint32(1) ) process.source = cms.Source("EmptySource", maxEvents = cms.untracked.int32(5), numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.get = cms.EDAnalyzer("EventSetupRecordDataGetter", toGet = cms.VPSet(cms.PSet( record = cms.string('DTT0Rcd'), data = cms.vstring('DTT0') ), cms.PSet( record = cms.string('DTTtrigRcd'), data = cms.vstring('DTTtrig') ), cms.PSet( record = cms.string('DTReadOutMappingRcd'), data = cms.vstring('DTReadOutMapping') )), verbose = cms.untracked.bool(True) ) process.printer = cms.OutputModule("AsciiOutputModule") process.p = cms.Path(process.get) process.ep = cms.EndPath(process.printer)
true
true
f724739e7899ac5f3fc808e74a2261f63bf6be67
2,010
py
Python
3_text/apache_log_parser_split.py
lluxury/P_U_S_A
1eb9d1fef74f9ce3618ae950f5223f598510be84
[ "MIT" ]
null
null
null
3_text/apache_log_parser_split.py
lluxury/P_U_S_A
1eb9d1fef74f9ce3618ae950f5223f598510be84
[ "MIT" ]
null
null
null
3_text/apache_log_parser_split.py
lluxury/P_U_S_A
1eb9d1fef74f9ce3618ae950f5223f598510be84
[ "MIT" ]
null
null
null
#!/usr/bin/env python """ USAGE: apache_log_parser_split.py some_log_file This script takes one command line argument: the name of a log file to parse. It then parses the log file and generates a report which associates remote hosts with number of bytes transferred to them. """ import sys def dictify_logline(line): '''return a dictionary of the pertinent pieces of an apache combined log file Currently, the only fields we are interested in are remote host and bytes sent, but we are putting status in there just for good measure. ''' split_line = line.split() return {'remote_host': split_line[0], 'status': split_line[8], 'bytes_sent': split_line[9], } def generate_log_report(logfile): '''return a dictionary of format remote_host=>[list of bytes sent] This function takes a file object, iterates through all the lines in the file, and generates a report of the number of bytes transferred to each remote host for each hit on the webserver. ''' report_dict = {} for line in logfile: line_dict = dictify_logline(line) # print (line_dict) try: bytes_sent = int(line_dict['bytes_sent']) except ValueError: ##totally disregard anything we don't understand continue report_dict.setdefault(line_dict['remote_host'], []).append(bytes_sent) return report_dict if __name__ == "__main__": if not len(sys.argv) > 1: print (__doc__) sys.exit(1) infile_name = sys.argv[1] try: infile = open(infile_name, 'r') except IOError: print ("You must specify a valid file to parse") print (__doc__) sys.exit(1) log_report = generate_log_report(infile) # print (log_report) infile.close() #__main__少量处理, 参数检查,尝试打开文件, 并传给generate_log_report() #generate_log_report()创建字典,迭代日志所有行,并每一行传给dictify_logline() #检查bytes_sent是不整数,利用dictify_logline()返回的数据升级字典,最后字典返回__main__
30.454545
83
0.676617
import sys def dictify_logline(line): split_line = line.split() return {'remote_host': split_line[0], 'status': split_line[8], 'bytes_sent': split_line[9], } def generate_log_report(logfile): report_dict = {} for line in logfile: line_dict = dictify_logline(line) print (line_dict) try: bytes_sent = int(line_dict['bytes_sent']) except ValueError: fault(line_dict['remote_host'], []).append(bytes_sent) return report_dict if __name__ == "__main__": if not len(sys.argv) > 1: print (__doc__) sys.exit(1) infile_name = sys.argv[1] try: infile = open(infile_name, 'r') except IOError: print ("You must specify a valid file to parse") print (__doc__) sys.exit(1) log_report = generate_log_report(infile) # print (log_report) infile.close() #__main__少量处理, 参数检查,尝试打开文件, 并传给generate_log_report() #generate_log_report()创建字典,迭代日志所有行,并每一行传给dictify_logline() #检查bytes_sent是不整数,利用dictify_logline()返回的数据升级字典,最后字典返回__main__
true
true
f724743cde7860172ea7e6028c7280410480dec1
7,225
py
Python
thx_bot/integration_conversations.py
thxprotocol/telegram-bot
d8c77060740f76ed50aab93cb7c6ca83567d1710
[ "Apache-2.0" ]
null
null
null
thx_bot/integration_conversations.py
thxprotocol/telegram-bot
d8c77060740f76ed50aab93cb7c6ca83567d1710
[ "Apache-2.0" ]
null
null
null
thx_bot/integration_conversations.py
thxprotocol/telegram-bot
d8c77060740f76ed50aab93cb7c6ca83567d1710
[ "Apache-2.0" ]
3
2021-10-31T19:20:39.000Z
2021-12-04T03:35:23.000Z
from telegram.ext import CommandHandler from telegram.ext import ConversationHandler from telegram.ext import Filters from telegram.ext import MessageHandler from thx_bot.commands import CHOOSING from thx_bot.commands import CHOOSING_ADD_MEMBER from thx_bot.commands import CHOOSING_REWARDS from thx_bot.commands import CHOOSING_SIGNUP from thx_bot.commands import CHOOSING_TOKENS from thx_bot.commands import CHOOSING_WALLET_UPDATE from thx_bot.commands import TYPING_REPLY from thx_bot.commands import TYPING_REPLY_MEMBER from thx_bot.commands import TYPING_REPLY_SIGNUP from thx_bot.commands import TYPING_REPLY_WALLET_UPDATE from thx_bot.commands import TYPING_REWARD_REPLY from thx_bot.commands import TYPING_TOKENS_REPLY from thx_bot.commands.add_member import start_adding_member from thx_bot.commands.add_member import done_member_add from thx_bot.commands.add_member import received_information_member_add from thx_bot.commands.add_member import regular_choice_member_add from thx_bot.commands.create_wallet import done_signup from thx_bot.commands.create_wallet import received_information_signup from thx_bot.commands.create_wallet import regular_choice_signup from thx_bot.commands.create_wallet import start_creating_wallet from thx_bot.commands.entrance import disable_entrance_checks from thx_bot.commands.entrance import done_permission from thx_bot.commands.entrance import permissions_entrypoint from thx_bot.commands.entrance import received_permission_amount from thx_bot.commands.entrance import regular_choice_permissions from thx_bot.commands.entrance import show_entrance_permision_for_channel from thx_bot.commands.entrance import toggle_users_with_rewards from thx_bot.commands.pool_rewards import done_rewards from thx_bot.commands.pool_rewards import pool_show_rewards_command from thx_bot.commands.pool_rewards import received_information_reward from thx_bot.commands.pool_rewards import regular_choice_reward from thx_bot.commands.pool_rewards import rewards_entrypoint from thx_bot.commands.register_channel import check_connection_channel from thx_bot.commands.register_channel import done_channel from thx_bot.commands.register_channel import received_information_channel from thx_bot.commands.register_channel import regular_choice_channel from thx_bot.commands.register_channel import start_setting_channel from thx_bot.commands.update_wallet import done_wallet_update from thx_bot.commands.update_wallet import received_information_wallet_update from thx_bot.commands.update_wallet import regular_choice_wallet_update from thx_bot.commands.update_wallet import start_updating_wallet register_channel_conversation = ConversationHandler( entry_points=[CommandHandler('register_channel', start_setting_channel)], # noqa states={ # noqa CHOOSING: [ MessageHandler( Filters.regex('^(Client id|Client secret|Pool address)$'), regular_choice_channel ), ], TYPING_REPLY: [ MessageHandler( Filters.text & ~(Filters.command | Filters.regex('^Done$')), received_information_channel, ) ], }, fallbacks=[ # noqa MessageHandler(Filters.regex('^Done$'), done_channel), MessageHandler(Filters.regex('^Test Connection$'), check_connection_channel), ], name="register_channel", persistent=False, ) create_wallet_conversation = ConversationHandler( entry_points=[CommandHandler('create_wallet', start_creating_wallet)], # noqa states={ # noqa CHOOSING_SIGNUP: [ MessageHandler( Filters.regex('^(Email|Password)$'), regular_choice_signup ), ], TYPING_REPLY_SIGNUP: [ MessageHandler( Filters.text & ~(Filters.command | Filters.regex('^Done$')), received_information_signup, ) ], }, fallbacks=[ # noqa MessageHandler(Filters.regex('^Done$'), done_signup), ], # noqa name="create_wallet", persistent=False, ) update_wallet_conversation = ConversationHandler( entry_points=[CommandHandler('update_wallet', start_updating_wallet)], # noqa states={ # noqa CHOOSING_WALLET_UPDATE: [ MessageHandler( Filters.regex('^Wallet Update$'), regular_choice_wallet_update ), ], TYPING_REPLY_WALLET_UPDATE: [ MessageHandler( Filters.text & ~(Filters.command | Filters.regex('^Done$')), received_information_wallet_update, ) ], }, fallbacks=[ # noqa MessageHandler(Filters.regex('^Done$'), done_wallet_update), ], # noqa name="update_wallet", persistent=False, ) rewards_conversation = ConversationHandler( entry_points=[CommandHandler('rewards', rewards_entrypoint)], # noqa states={ # noqa CHOOSING_REWARDS: [ MessageHandler( Filters.regex('^Set Reward$'), regular_choice_reward ), ], TYPING_REWARD_REPLY: [ MessageHandler( Filters.text & ~(Filters.command | Filters.regex('^Done$')), received_information_reward, ) ], }, fallbacks=[ # noqa MessageHandler(Filters.regex('^Done$'), done_rewards), MessageHandler(Filters.regex('^Show rewards$'), pool_show_rewards_command), ], # noqa name="rewards", persistent=False, ) entrance_tokens_conversation = ConversationHandler( entry_points=[CommandHandler('entrance', permissions_entrypoint)], states={ # noqa CHOOSING_TOKENS: [ MessageHandler( Filters.regex('^Set entrance amount$'), regular_choice_permissions ), ], TYPING_TOKENS_REPLY: [ MessageHandler( Filters.text & ~(Filters.command | Filters.regex('^Done$')), received_permission_amount, ) ], }, fallbacks=[ # noqa MessageHandler(Filters.regex('^Done$'), done_permission), MessageHandler( Filters.regex('^Show entrance configuration$'), show_entrance_permision_for_channel), MessageHandler( Filters.regex('^Disable entrance checks$'), disable_entrance_checks), MessageHandler( Filters.regex('^Toggle only users with rewards$'), toggle_users_with_rewards), ], # noqa name="entrance", persistent=False, ) add_member_conversation = ConversationHandler( entry_points=[CommandHandler('add_me_to_pool', start_adding_member)], # noqa states={ # noqa CHOOSING_ADD_MEMBER: [ MessageHandler( Filters.regex('^Add your wallet$'), regular_choice_member_add ), ], TYPING_REPLY_MEMBER: [ MessageHandler( Filters.text & ~(Filters.command | Filters.regex('^Done$')), received_information_member_add, ) ], }, fallbacks=[ # noqa MessageHandler(Filters.regex('^Done$'), done_member_add), ], # noqa name="add_member", persistent=False, )
37.827225
97
0.701315
from telegram.ext import CommandHandler from telegram.ext import ConversationHandler from telegram.ext import Filters from telegram.ext import MessageHandler from thx_bot.commands import CHOOSING from thx_bot.commands import CHOOSING_ADD_MEMBER from thx_bot.commands import CHOOSING_REWARDS from thx_bot.commands import CHOOSING_SIGNUP from thx_bot.commands import CHOOSING_TOKENS from thx_bot.commands import CHOOSING_WALLET_UPDATE from thx_bot.commands import TYPING_REPLY from thx_bot.commands import TYPING_REPLY_MEMBER from thx_bot.commands import TYPING_REPLY_SIGNUP from thx_bot.commands import TYPING_REPLY_WALLET_UPDATE from thx_bot.commands import TYPING_REWARD_REPLY from thx_bot.commands import TYPING_TOKENS_REPLY from thx_bot.commands.add_member import start_adding_member from thx_bot.commands.add_member import done_member_add from thx_bot.commands.add_member import received_information_member_add from thx_bot.commands.add_member import regular_choice_member_add from thx_bot.commands.create_wallet import done_signup from thx_bot.commands.create_wallet import received_information_signup from thx_bot.commands.create_wallet import regular_choice_signup from thx_bot.commands.create_wallet import start_creating_wallet from thx_bot.commands.entrance import disable_entrance_checks from thx_bot.commands.entrance import done_permission from thx_bot.commands.entrance import permissions_entrypoint from thx_bot.commands.entrance import received_permission_amount from thx_bot.commands.entrance import regular_choice_permissions from thx_bot.commands.entrance import show_entrance_permision_for_channel from thx_bot.commands.entrance import toggle_users_with_rewards from thx_bot.commands.pool_rewards import done_rewards from thx_bot.commands.pool_rewards import pool_show_rewards_command from thx_bot.commands.pool_rewards import received_information_reward from thx_bot.commands.pool_rewards import regular_choice_reward from thx_bot.commands.pool_rewards import rewards_entrypoint from thx_bot.commands.register_channel import check_connection_channel from thx_bot.commands.register_channel import done_channel from thx_bot.commands.register_channel import received_information_channel from thx_bot.commands.register_channel import regular_choice_channel from thx_bot.commands.register_channel import start_setting_channel from thx_bot.commands.update_wallet import done_wallet_update from thx_bot.commands.update_wallet import received_information_wallet_update from thx_bot.commands.update_wallet import regular_choice_wallet_update from thx_bot.commands.update_wallet import start_updating_wallet register_channel_conversation = ConversationHandler( entry_points=[CommandHandler('register_channel', start_setting_channel)], states={ CHOOSING: [ MessageHandler( Filters.regex('^(Client id|Client secret|Pool address)$'), regular_choice_channel ), ], TYPING_REPLY: [ MessageHandler( Filters.text & ~(Filters.command | Filters.regex('^Done$')), received_information_channel, ) ], }, fallbacks=[ MessageHandler(Filters.regex('^Done$'), done_channel), MessageHandler(Filters.regex('^Test Connection$'), check_connection_channel), ], name="register_channel", persistent=False, ) create_wallet_conversation = ConversationHandler( entry_points=[CommandHandler('create_wallet', start_creating_wallet)], states={ CHOOSING_SIGNUP: [ MessageHandler( Filters.regex('^(Email|Password)$'), regular_choice_signup ), ], TYPING_REPLY_SIGNUP: [ MessageHandler( Filters.text & ~(Filters.command | Filters.regex('^Done$')), received_information_signup, ) ], }, fallbacks=[ MessageHandler(Filters.regex('^Done$'), done_signup), ], name="create_wallet", persistent=False, ) update_wallet_conversation = ConversationHandler( entry_points=[CommandHandler('update_wallet', start_updating_wallet)], states={ CHOOSING_WALLET_UPDATE: [ MessageHandler( Filters.regex('^Wallet Update$'), regular_choice_wallet_update ), ], TYPING_REPLY_WALLET_UPDATE: [ MessageHandler( Filters.text & ~(Filters.command | Filters.regex('^Done$')), received_information_wallet_update, ) ], }, fallbacks=[ MessageHandler(Filters.regex('^Done$'), done_wallet_update), ], name="update_wallet", persistent=False, ) rewards_conversation = ConversationHandler( entry_points=[CommandHandler('rewards', rewards_entrypoint)], states={ CHOOSING_REWARDS: [ MessageHandler( Filters.regex('^Set Reward$'), regular_choice_reward ), ], TYPING_REWARD_REPLY: [ MessageHandler( Filters.text & ~(Filters.command | Filters.regex('^Done$')), received_information_reward, ) ], }, fallbacks=[ MessageHandler(Filters.regex('^Done$'), done_rewards), MessageHandler(Filters.regex('^Show rewards$'), pool_show_rewards_command), ], name="rewards", persistent=False, ) entrance_tokens_conversation = ConversationHandler( entry_points=[CommandHandler('entrance', permissions_entrypoint)], states={ CHOOSING_TOKENS: [ MessageHandler( Filters.regex('^Set entrance amount$'), regular_choice_permissions ), ], TYPING_TOKENS_REPLY: [ MessageHandler( Filters.text & ~(Filters.command | Filters.regex('^Done$')), received_permission_amount, ) ], }, fallbacks=[ MessageHandler(Filters.regex('^Done$'), done_permission), MessageHandler( Filters.regex('^Show entrance configuration$'), show_entrance_permision_for_channel), MessageHandler( Filters.regex('^Disable entrance checks$'), disable_entrance_checks), MessageHandler( Filters.regex('^Toggle only users with rewards$'), toggle_users_with_rewards), ], name="entrance", persistent=False, ) add_member_conversation = ConversationHandler( entry_points=[CommandHandler('add_me_to_pool', start_adding_member)], states={ CHOOSING_ADD_MEMBER: [ MessageHandler( Filters.regex('^Add your wallet$'), regular_choice_member_add ), ], TYPING_REPLY_MEMBER: [ MessageHandler( Filters.text & ~(Filters.command | Filters.regex('^Done$')), received_information_member_add, ) ], }, fallbacks=[ MessageHandler(Filters.regex('^Done$'), done_member_add), ], name="add_member", persistent=False, )
true
true
f72474b80565c33b2edda35cd6580c085dbf16da
17,832
py
Python
pybind/nos/v6_0_2c/rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/peer_grps/neighbor_peer_grp/update_source/__init__.py
shivharis/pybind
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
[ "Apache-2.0" ]
null
null
null
pybind/nos/v6_0_2c/rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/peer_grps/neighbor_peer_grp/update_source/__init__.py
shivharis/pybind
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
[ "Apache-2.0" ]
null
null
null
pybind/nos/v6_0_2c/rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/peer_grps/neighbor_peer_grp/update_source/__init__.py
shivharis/pybind
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
[ "Apache-2.0" ]
1
2021-11-05T22:15:42.000Z
2021-11-05T22:15:42.000Z
from operator import attrgetter import pyangbind.lib.xpathhelper as xpathhelper from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType from pyangbind.lib.base import PybindBase from decimal import Decimal from bitarray import bitarray import __builtin__ import ethernet_interface class update_source(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module brocade-rbridge - based on the path /rbridge-id/router/router-bgp/router-bgp-attributes/neighbor/peer-grps/neighbor-peer-grp/update-source. Each member element of the container is represented as a class variable - with a specific YANG type. """ __slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__sip_ipv4_address','__ethernet_interface','__loopback','__ve_interface',) _yang_name = 'update-source' _rest_name = 'update-source' _pybind_generated_by = 'container' def __init__(self, *args, **kwargs): path_helper_ = kwargs.pop("path_helper", None) if path_helper_ is False: self._path_helper = False elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper): self._path_helper = path_helper_ elif hasattr(self, "_parent"): path_helper_ = getattr(self._parent, "_path_helper", False) self._path_helper = path_helper_ else: self._path_helper = False extmethods = kwargs.pop("extmethods", None) if extmethods is False: self._extmethods = False elif extmethods is not None and isinstance(extmethods, dict): self._extmethods = extmethods elif hasattr(self, "_parent"): extmethods = getattr(self._parent, "_extmethods", None) self._extmethods = extmethods else: self._extmethods = False self.__ve_interface = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="ve-interface", rest_name="ve-interface", parent=self, choice=(u'ch-update-source', u'ca-ve'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='interface:ve-type', is_config=True) self.__sip_ipv4_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True) self.__loopback = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="loopback", rest_name="loopback", parent=self, choice=(u'ch-update-source', u'ca-loopback'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Loopback Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='loopback-interface', is_config=True) self.__ethernet_interface = YANGDynClass(base=ethernet_interface.ethernet_interface, is_container='container', presence=False, yang_name="ethernet-interface", rest_name="", parent=self, choice=(u'ch-update-source', u'ca-eth'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return [u'rbridge-id', u'router', u'router-bgp', u'router-bgp-attributes', u'neighbor', u'peer-grps', u'neighbor-peer-grp', u'update-source'] def _rest_path(self): if hasattr(self, "_parent"): if self._rest_name: return self._parent._rest_path()+[self._rest_name] else: return self._parent._rest_path() else: return [u'rbridge-id', u'router', u'bgp', u'neighbor', u'neighbor-peer-grp', u'update-source'] def _get_sip_ipv4_address(self): """ Getter method for sip_ipv4_address, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/peer_grps/neighbor_peer_grp/update_source/sip_ipv4_address (sip-ipv4-address) """ return self.__sip_ipv4_address def _set_sip_ipv4_address(self, v, load=False): """ Setter method for sip_ipv4_address, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/peer_grps/neighbor_peer_grp/update_source/sip_ipv4_address (sip-ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_sip_ipv4_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sip_ipv4_address() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """sip_ipv4_address must be of a type compatible with sip-ipv4-address""", 'defined-type': "brocade-bgp:sip-ipv4-address", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True)""", }) self.__sip_ipv4_address = t if hasattr(self, '_set'): self._set() def _unset_sip_ipv4_address(self): self.__sip_ipv4_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True) def _get_ethernet_interface(self): """ Getter method for ethernet_interface, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/peer_grps/neighbor_peer_grp/update_source/ethernet_interface (container) """ return self.__ethernet_interface def _set_ethernet_interface(self, v, load=False): """ Setter method for ethernet_interface, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/peer_grps/neighbor_peer_grp/update_source/ethernet_interface (container) If this variable is read-only (config: false) in the source YANG file, then _set_ethernet_interface is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ethernet_interface() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=ethernet_interface.ethernet_interface, is_container='container', presence=False, yang_name="ethernet-interface", rest_name="", parent=self, choice=(u'ch-update-source', u'ca-eth'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ethernet_interface must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=ethernet_interface.ethernet_interface, is_container='container', presence=False, yang_name="ethernet-interface", rest_name="", parent=self, choice=(u'ch-update-source', u'ca-eth'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""", }) self.__ethernet_interface = t if hasattr(self, '_set'): self._set() def _unset_ethernet_interface(self): self.__ethernet_interface = YANGDynClass(base=ethernet_interface.ethernet_interface, is_container='container', presence=False, yang_name="ethernet-interface", rest_name="", parent=self, choice=(u'ch-update-source', u'ca-eth'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) def _get_loopback(self): """ Getter method for loopback, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/peer_grps/neighbor_peer_grp/update_source/loopback (loopback-interface) """ return self.__loopback def _set_loopback(self, v, load=False): """ Setter method for loopback, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/peer_grps/neighbor_peer_grp/update_source/loopback (loopback-interface) If this variable is read-only (config: false) in the source YANG file, then _set_loopback is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_loopback() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="loopback", rest_name="loopback", parent=self, choice=(u'ch-update-source', u'ca-loopback'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Loopback Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='loopback-interface', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """loopback must be of a type compatible with loopback-interface""", 'defined-type': "brocade-bgp:loopback-interface", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="loopback", rest_name="loopback", parent=self, choice=(u'ch-update-source', u'ca-loopback'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Loopback Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='loopback-interface', is_config=True)""", }) self.__loopback = t if hasattr(self, '_set'): self._set() def _unset_loopback(self): self.__loopback = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="loopback", rest_name="loopback", parent=self, choice=(u'ch-update-source', u'ca-loopback'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Loopback Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='loopback-interface', is_config=True) def _get_ve_interface(self): """ Getter method for ve_interface, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/peer_grps/neighbor_peer_grp/update_source/ve_interface (interface:ve-type) """ return self.__ve_interface def _set_ve_interface(self, v, load=False): """ Setter method for ve_interface, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/peer_grps/neighbor_peer_grp/update_source/ve_interface (interface:ve-type) If this variable is read-only (config: false) in the source YANG file, then _set_ve_interface is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ve_interface() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="ve-interface", rest_name="ve-interface", parent=self, choice=(u'ch-update-source', u'ca-ve'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='interface:ve-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ve_interface must be of a type compatible with interface:ve-type""", 'defined-type': "interface:ve-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="ve-interface", rest_name="ve-interface", parent=self, choice=(u'ch-update-source', u'ca-ve'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='interface:ve-type', is_config=True)""", }) self.__ve_interface = t if hasattr(self, '_set'): self._set() def _unset_ve_interface(self): self.__ve_interface = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="ve-interface", rest_name="ve-interface", parent=self, choice=(u'ch-update-source', u'ca-ve'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='interface:ve-type', is_config=True) sip_ipv4_address = __builtin__.property(_get_sip_ipv4_address, _set_sip_ipv4_address) ethernet_interface = __builtin__.property(_get_ethernet_interface, _set_ethernet_interface) loopback = __builtin__.property(_get_loopback, _set_loopback) ve_interface = __builtin__.property(_get_ve_interface, _set_ve_interface) __choices__ = {u'ch-update-source': {u'ca-eth': [u'ethernet_interface'], u'ca-ve': [u've_interface'], u'ca-ipv4': [u'sip_ipv4_address'], u'ca-loopback': [u'loopback']}} _pyangbind_elements = {'sip_ipv4_address': sip_ipv4_address, 'ethernet_interface': ethernet_interface, 'loopback': loopback, 've_interface': ve_interface, }
77.868996
648
0.730316
from operator import attrgetter import pyangbind.lib.xpathhelper as xpathhelper from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType from pyangbind.lib.base import PybindBase from decimal import Decimal from bitarray import bitarray import __builtin__ import ethernet_interface class update_source(PybindBase): __slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__sip_ipv4_address','__ethernet_interface','__loopback','__ve_interface',) _yang_name = 'update-source' _rest_name = 'update-source' _pybind_generated_by = 'container' def __init__(self, *args, **kwargs): path_helper_ = kwargs.pop("path_helper", None) if path_helper_ is False: self._path_helper = False elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper): self._path_helper = path_helper_ elif hasattr(self, "_parent"): path_helper_ = getattr(self._parent, "_path_helper", False) self._path_helper = path_helper_ else: self._path_helper = False extmethods = kwargs.pop("extmethods", None) if extmethods is False: self._extmethods = False elif extmethods is not None and isinstance(extmethods, dict): self._extmethods = extmethods elif hasattr(self, "_parent"): extmethods = getattr(self._parent, "_extmethods", None) self._extmethods = extmethods else: self._extmethods = False self.__ve_interface = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="ve-interface", rest_name="ve-interface", parent=self, choice=(u'ch-update-source', u'ca-ve'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='interface:ve-type', is_config=True) self.__sip_ipv4_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True) self.__loopback = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="loopback", rest_name="loopback", parent=self, choice=(u'ch-update-source', u'ca-loopback'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Loopback Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='loopback-interface', is_config=True) self.__ethernet_interface = YANGDynClass(base=ethernet_interface.ethernet_interface, is_container='container', presence=False, yang_name="ethernet-interface", rest_name="", parent=self, choice=(u'ch-update-source', u'ca-eth'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return [u'rbridge-id', u'router', u'router-bgp', u'router-bgp-attributes', u'neighbor', u'peer-grps', u'neighbor-peer-grp', u'update-source'] def _rest_path(self): if hasattr(self, "_parent"): if self._rest_name: return self._parent._rest_path()+[self._rest_name] else: return self._parent._rest_path() else: return [u'rbridge-id', u'router', u'bgp', u'neighbor', u'neighbor-peer-grp', u'update-source'] def _get_sip_ipv4_address(self): return self.__sip_ipv4_address def _set_sip_ipv4_address(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """sip_ipv4_address must be of a type compatible with sip-ipv4-address""", 'defined-type': "brocade-bgp:sip-ipv4-address", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True)""", }) self.__sip_ipv4_address = t if hasattr(self, '_set'): self._set() def _unset_sip_ipv4_address(self): self.__sip_ipv4_address = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u'ch-update-source', u'ca-ipv4'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='sip-ipv4-address', is_config=True) def _get_ethernet_interface(self): return self.__ethernet_interface def _set_ethernet_interface(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=ethernet_interface.ethernet_interface, is_container='container', presence=False, yang_name="ethernet-interface", rest_name="", parent=self, choice=(u'ch-update-source', u'ca-eth'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ethernet_interface must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=ethernet_interface.ethernet_interface, is_container='container', presence=False, yang_name="ethernet-interface", rest_name="", parent=self, choice=(u'ch-update-source', u'ca-eth'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""", }) self.__ethernet_interface = t if hasattr(self, '_set'): self._set() def _unset_ethernet_interface(self): self.__ethernet_interface = YANGDynClass(base=ethernet_interface.ethernet_interface, is_container='container', presence=False, yang_name="ethernet-interface", rest_name="", parent=self, choice=(u'ch-update-source', u'ca-eth'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-sequence-commands': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) def _get_loopback(self): return self.__loopback def _set_loopback(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="loopback", rest_name="loopback", parent=self, choice=(u'ch-update-source', u'ca-loopback'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Loopback Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='loopback-interface', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """loopback must be of a type compatible with loopback-interface""", 'defined-type': "brocade-bgp:loopback-interface", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="loopback", rest_name="loopback", parent=self, choice=(u'ch-update-source', u'ca-loopback'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Loopback Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='loopback-interface', is_config=True)""", }) self.__loopback = t if hasattr(self, '_set'): self._set() def _unset_loopback(self): self.__loopback = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..63']}), is_leaf=True, yang_name="loopback", rest_name="loopback", parent=self, choice=(u'ch-update-source', u'ca-loopback'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Loopback Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='loopback-interface', is_config=True) def _get_ve_interface(self): return self.__ve_interface def _set_ve_interface(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="ve-interface", rest_name="ve-interface", parent=self, choice=(u'ch-update-source', u'ca-ve'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='interface:ve-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ve_interface must be of a type compatible with interface:ve-type""", 'defined-type': "interface:ve-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="ve-interface", rest_name="ve-interface", parent=self, choice=(u'ch-update-source', u'ca-ve'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='interface:ve-type', is_config=True)""", }) self.__ve_interface = t if hasattr(self, '_set'): self._set() def _unset_ve_interface(self): self.__ve_interface = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="ve-interface", rest_name="ve-interface", parent=self, choice=(u'ch-update-source', u'ca-ve'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Virtual Interface'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='interface:ve-type', is_config=True) sip_ipv4_address = __builtin__.property(_get_sip_ipv4_address, _set_sip_ipv4_address) ethernet_interface = __builtin__.property(_get_ethernet_interface, _set_ethernet_interface) loopback = __builtin__.property(_get_loopback, _set_loopback) ve_interface = __builtin__.property(_get_ve_interface, _set_ve_interface) __choices__ = {u'ch-update-source': {u'ca-eth': [u'ethernet_interface'], u'ca-ve': [u've_interface'], u'ca-ipv4': [u'sip_ipv4_address'], u'ca-loopback': [u'loopback']}} _pyangbind_elements = {'sip_ipv4_address': sip_ipv4_address, 'ethernet_interface': ethernet_interface, 'loopback': loopback, 've_interface': ve_interface, }
true
true
f72475dc8fb021a442a84a5e411f00ab6c251ab0
1,015
py
Python
mud.py
nparry0/mud
ec520795e2206d5e885c343c13e3ead5d2787cd5
[ "MIT" ]
2
2018-03-15T06:00:18.000Z
2018-03-22T03:19:38.000Z
mud.py
nparry0/mud
ec520795e2206d5e885c343c13e3ead5d2787cd5
[ "MIT" ]
null
null
null
mud.py
nparry0/mud
ec520795e2206d5e885c343c13e3ead5d2787cd5
[ "MIT" ]
null
null
null
import gevent.server from mud_telnet_handler import MudTelnetHandler from game_server import GameServer import argparse import logging # Set up logging log = logging.getLogger() handler = logging.StreamHandler() formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s') handler.setFormatter(formatter) log.addHandler(handler) log.setLevel(logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument("--playerdir", type=str, default="./players", help="Directory where all player data is kept.") parser.add_argument("--map", type=str, default="mud.map", help="Map file") parser.add_argument("--port", type=int, default=3000, help="Listening port.") args = parser.parse_args() log.info("Mud starting. Params: %s" % args) # Set up some class vars of MudTelnetHandler MudTelnetHandler.player_dir = args.playerdir + "/" MudTelnetHandler.game_server = GameServer(args.map) server = gevent.server.StreamServer(("", args.port), MudTelnetHandler.streamserver_handle) server.serve_forever()
32.741935
114
0.770443
import gevent.server from mud_telnet_handler import MudTelnetHandler from game_server import GameServer import argparse import logging log = logging.getLogger() handler = logging.StreamHandler() formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s') handler.setFormatter(formatter) log.addHandler(handler) log.setLevel(logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument("--playerdir", type=str, default="./players", help="Directory where all player data is kept.") parser.add_argument("--map", type=str, default="mud.map", help="Map file") parser.add_argument("--port", type=int, default=3000, help="Listening port.") args = parser.parse_args() log.info("Mud starting. Params: %s" % args) MudTelnetHandler.player_dir = args.playerdir + "/" MudTelnetHandler.game_server = GameServer(args.map) server = gevent.server.StreamServer(("", args.port), MudTelnetHandler.streamserver_handle) server.serve_forever()
true
true
f724762d197c2f0292cdab0ed9ca4350acb293d2
37,046
py
Python
allauth/account/tests.py
pkyad/django-allauth
1ffd9c2c4a121b9cdcb5814209933ce74d4303c5
[ "MIT" ]
null
null
null
allauth/account/tests.py
pkyad/django-allauth
1ffd9c2c4a121b9cdcb5814209933ce74d4303c5
[ "MIT" ]
null
null
null
allauth/account/tests.py
pkyad/django-allauth
1ffd9c2c4a121b9cdcb5814209933ce74d4303c5
[ "MIT" ]
null
null
null
from __future__ import absolute_import import json from datetime import timedelta import django from django.utils.timezone import now from django.test.utils import override_settings from django.conf import settings from django.core.urlresolvers import reverse from django.test.client import Client from django.core import mail from django.test.client import RequestFactory from django.contrib.auth.models import AnonymousUser, AbstractUser from django.db import models import unittest from allauth.tests import TestCase, patch from allauth.account.forms import BaseSignupForm from allauth.account.models import ( EmailAddress, EmailConfirmation, EmailConfirmationHMAC) from allauth.utils import ( get_current_site, get_user_model, get_username_max_length) from . import app_settings from .auth_backends import AuthenticationBackend from .adapter import get_adapter from .utils import url_str_to_user_pk, user_pk_to_url_str import uuid @override_settings( ACCOUNT_DEFAULT_HTTP_PROTOCOL='https', ACCOUNT_EMAIL_VERIFICATION=app_settings.EmailVerificationMethod.MANDATORY, ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME, ACCOUNT_SIGNUP_FORM_CLASS=None, ACCOUNT_EMAIL_SUBJECT_PREFIX=None, LOGIN_REDIRECT_URL='/accounts/profile/', ACCOUNT_ADAPTER='allauth.account.adapter.DefaultAccountAdapter', ACCOUNT_USERNAME_REQUIRED=True) class AccountTests(TestCase): def setUp(self): if 'allauth.socialaccount' in settings.INSTALLED_APPS: # Otherwise ImproperlyConfigured exceptions may occur from ..socialaccount.models import SocialApp sa = SocialApp.objects.create(name='testfb', provider='facebook') sa.sites.add(get_current_site()) @override_settings( ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod .USERNAME_EMAIL) def test_username_containing_at(self): user = get_user_model().objects.create(username='@raymond.penners') user.set_password('psst') user.save() EmailAddress.objects.create(user=user, email='raymond.penners@gmail.com', primary=True, verified=True) resp = self.client.post(reverse('account_login'), {'login': '@raymond.penners', 'password': 'psst'}) self.assertRedirects(resp, 'http://testserver'+settings.LOGIN_REDIRECT_URL, fetch_redirect_response=False) def test_signup_same_email_verified_externally(self): user = self._test_signup_email_verified_externally('john@doe.com', 'john@doe.com') self.assertEqual(EmailAddress.objects.filter(user=user).count(), 1) EmailAddress.objects.get(verified=True, email='john@doe.com', user=user, primary=True) def test_signup_other_email_verified_externally(self): """ John is invited on john@work.com, but signs up via john@home.com. E-mail verification is by-passed, their home e-mail address is used as a secondary. """ user = self._test_signup_email_verified_externally('john@home.com', 'john@work.com') self.assertEqual(EmailAddress.objects.filter(user=user).count(), 2) EmailAddress.objects.get(verified=False, email='john@home.com', user=user, primary=False) EmailAddress.objects.get(verified=True, email='john@work.com', user=user, primary=True) def _test_signup_email_verified_externally(self, signup_email, verified_email): username = 'johndoe' request = RequestFactory().post(reverse('account_signup'), {'username': username, 'email': signup_email, 'password1': 'johndoe', 'password2': 'johndoe'}) # Fake stash_verified_email from django.contrib.messages.middleware import MessageMiddleware from django.contrib.sessions.middleware import SessionMiddleware SessionMiddleware().process_request(request) MessageMiddleware().process_request(request) request.user = AnonymousUser() request.session['account_verified_email'] = verified_email from .views import signup resp = signup(request) self.assertEqual(resp.status_code, 302) self.assertEqual(resp['location'], get_adapter().get_login_redirect_url(request)) self.assertEqual(len(mail.outbox), 0) return get_user_model().objects.get(username=username) @override_settings( ACCOUNT_USERNAME_REQUIRED=True, ACCOUNT_SIGNUP_EMAIL_ENTER_TWICE=True) def test_signup_email_twice(self): request = RequestFactory().post(reverse('account_signup'), {'username': 'johndoe', 'email1': 'john@work.com', 'email2': 'john@work.com', 'password1': 'johndoe', 'password2': 'johndoe'}) from django.contrib.messages.middleware import MessageMiddleware from django.contrib.sessions.middleware import SessionMiddleware SessionMiddleware().process_request(request) MessageMiddleware().process_request(request) request.user = AnonymousUser() from .views import signup signup(request) user = get_user_model().objects.get(username='johndoe') self.assertEqual(user.email, 'john@work.com') def _create_user(self): user = get_user_model().objects.create(username='john', is_active=True) user.set_password('doe') user.save() return user def _create_user_and_login(self): user = self._create_user() self.client.login(username='john', password='doe') return user def test_redirect_when_authenticated(self): self._create_user_and_login() c = self.client resp = c.get(reverse('account_login')) self.assertRedirects(resp, 'http://testserver/accounts/profile/', fetch_redirect_response=False) def test_password_reset_get(self): resp = self.client.get(reverse('account_reset_password')) self.assertTemplateUsed(resp, 'account/password_reset.html') def test_password_set_redirect(self): resp = self._password_set_or_reset_redirect('account_set_password', True) self.assertEqual(resp.status_code, 302) def test_password_reset_no_redirect(self): resp = self._password_set_or_reset_redirect('account_change_password', True) self.assertEqual(resp.status_code, 200) def test_password_set_no_redirect(self): resp = self._password_set_or_reset_redirect('account_set_password', False) self.assertEqual(resp.status_code, 200) def test_password_reset_redirect(self): resp = self._password_set_or_reset_redirect('account_change_password', False) self.assertEqual(resp.status_code, 302) def _password_set_or_reset_redirect(self, urlname, usable_password): user = self._create_user_and_login() c = self.client if not usable_password: user.set_unusable_password() user.save() resp = c.get(reverse(urlname)) return resp def test_password_forgotten_username_hint(self): user = self._request_new_password() body = mail.outbox[0].body assert user.username in body @override_settings( ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_password_forgotten_no_username_hint(self): user = self._request_new_password() body = mail.outbox[0].body assert user.username not in body def _request_new_password(self): user = get_user_model().objects.create( username='john', email='john@doe.org', is_active=True) user.set_password('doe') user.save() self.client.post( reverse('account_reset_password'), data={'email': 'john@doe.org'}) self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ['john@doe.org']) return user def test_password_reset_flow(self): """ Tests the password reset flow: requesting a new password, receiving the reset link via email and finally resetting the password to a new value. """ # Request new password user = self._request_new_password() body = mail.outbox[0].body self.assertGreater(body.find('https://'), 0) # Extract URL for `password_reset_from_key` view and access it url = body[body.find('/password/reset/'):].split()[0] resp = self.client.get(url) self.assertTemplateUsed( resp, 'account/password_reset_from_key.%s' % app_settings.TEMPLATE_EXTENSION) self.assertFalse('token_fail' in resp.context_data) # Reset the password resp = self.client.post(url, {'password1': 'newpass123', 'password2': 'newpass123'}) self.assertRedirects(resp, reverse('account_reset_password_from_key_done')) # Check the new password is in effect user = get_user_model().objects.get(pk=user.pk) self.assertTrue(user.check_password('newpass123')) # Trying to reset the password against the same URL (or any other # invalid/obsolete URL) returns a bad token response resp = self.client.post(url, {'password1': 'newpass123', 'password2': 'newpass123'}) self.assertTemplateUsed( resp, 'account/password_reset_from_key.%s' % app_settings.TEMPLATE_EXTENSION) self.assertTrue(resp.context_data['token_fail']) # Same should happen when accessing the page directly response = self.client.get(url) self.assertTemplateUsed( response, 'account/password_reset_from_key.%s' % app_settings.TEMPLATE_EXTENSION) self.assertTrue(response.context_data['token_fail']) # When in XHR views, it should respond with a 400 bad request # code, and the response body should contain the JSON-encoded # error from the adapter response = self.client.post(url, {'password1': 'newpass123', 'password2': 'newpass123'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') self.assertEqual(response.status_code, 400) data = json.loads(response.content.decode('utf8')) self.assertTrue('form_errors' in data) self.assertTrue('__all__' in data['form_errors']) @override_settings(ACCOUNT_LOGIN_ON_PASSWORD_RESET=True) def test_password_reset_ACCOUNT_LOGIN_ON_PASSWORD_RESET(self): user = self._request_new_password() body = mail.outbox[0].body url = body[body.find('/password/reset/'):].split()[0] resp = self.client.post( url, {'password1': 'newpass123', 'password2': 'newpass123'}) self.assertTrue(user.is_authenticated()) # EmailVerificationMethod.MANDATORY sends us to the confirm-email page self.assertRedirects(resp, '/confirm-email/') @override_settings(ACCOUNT_EMAIL_CONFIRMATION_HMAC=False) def test_email_verification_mandatory(self): c = Client() # Signup resp = c.post(reverse('account_signup'), {'username': 'johndoe', 'email': 'john@doe.com', 'password1': 'johndoe', 'password2': 'johndoe'}, follow=True) self.assertEqual(resp.status_code, 200) self.assertEqual(mail.outbox[0].to, ['john@doe.com']) self.assertGreater(mail.outbox[0].body.find('https://'), 0) self.assertEqual(len(mail.outbox), 1) self.assertTemplateUsed( resp, 'account/verification_sent.%s' % app_settings.TEMPLATE_EXTENSION) # Attempt to login, unverified for attempt in [1, 2]: resp = c.post(reverse('account_login'), {'login': 'johndoe', 'password': 'johndoe'}, follow=True) # is_active is controlled by the admin to manually disable # users. I don't want this flag to flip automatically whenever # users verify their email adresses. self.assertTrue(get_user_model().objects.filter( username='johndoe', is_active=True).exists()) self.assertTemplateUsed( resp, 'account/verification_sent.' + app_settings.TEMPLATE_EXTENSION) # Attempt 1: no mail is sent due to cool-down , # but there was already a mail in the outbox. self.assertEqual(len(mail.outbox), attempt) self.assertEqual( EmailConfirmation.objects.filter( email_address__email='john@doe.com').count(), attempt) # Wait for cooldown EmailConfirmation.objects.update(sent=now() - timedelta(days=1)) # Verify, and re-attempt to login. confirmation = EmailConfirmation \ .objects \ .filter(email_address__user__username='johndoe')[:1] \ .get() resp = c.get(reverse('account_confirm_email', args=[confirmation.key])) self.assertTemplateUsed( resp, 'account/email_confirm.%s' % app_settings.TEMPLATE_EXTENSION) c.post(reverse('account_confirm_email', args=[confirmation.key])) resp = c.post(reverse('account_login'), {'login': 'johndoe', 'password': 'johndoe'}) self.assertRedirects(resp, 'http://testserver'+settings.LOGIN_REDIRECT_URL, fetch_redirect_response=False) def test_email_escaping(self): site = get_current_site() site.name = '<enc&"test>' site.save() u = get_user_model().objects.create( username='test', email='foo@bar.com') request = RequestFactory().get('/') EmailAddress.objects.add_email(request, u, u.email, confirm=True) self.assertTrue(mail.outbox[0].subject[1:].startswith(site.name)) @override_settings( ACCOUNT_EMAIL_VERIFICATION=app_settings.EmailVerificationMethod .OPTIONAL) def test_login_unverified_account_optional(self): """Tests login behavior when email verification is optional.""" user = get_user_model().objects.create(username='john') user.set_password('doe') user.save() EmailAddress.objects.create(user=user, email='john@example.com', primary=True, verified=False) resp = self.client.post(reverse('account_login'), {'login': 'john', 'password': 'doe'}) self.assertRedirects(resp, 'http://testserver'+settings.LOGIN_REDIRECT_URL, fetch_redirect_response=False) @override_settings( ACCOUNT_EMAIL_VERIFICATION=app_settings.EmailVerificationMethod .OPTIONAL, ACCOUNT_LOGIN_ATTEMPTS_LIMIT=3) def test_login_failed_attempts_exceeded(self): user = get_user_model().objects.create(username='john') user.set_password('doe') user.save() EmailAddress.objects.create(user=user, email='john@example.com', primary=True, verified=False) for i in range(5): is_valid_attempt = (i == 4) is_locked = (i >= 3) resp = self.client.post( reverse('account_login'), {'login': 'john', 'password': ( 'doe' if is_valid_attempt else 'wrong')}) self.assertFormError( resp, 'form', None, 'Too many failed login attempts. Try again later.' if is_locked else 'The username and/or password you specified are not correct.') def test_login_unverified_account_mandatory(self): """Tests login behavior when email verification is mandatory.""" user = get_user_model().objects.create(username='john') user.set_password('doe') user.save() EmailAddress.objects.create(user=user, email='john@example.com', primary=True, verified=False) resp = self.client.post(reverse('account_login'), {'login': 'john', 'password': 'doe'}) self.assertRedirects(resp, reverse('account_email_verification_sent')) def test_login_inactive_account(self): """ Tests login behavior with inactive accounts. Inactive user accounts should be prevented from performing any actions, regardless of their verified state. """ # Inactive and verified user account user = get_user_model().objects.create(username='john', is_active=False) user.set_password('doe') user.save() EmailAddress.objects.create(user=user, email='john@example.com', primary=True, verified=True) resp = self.client.post(reverse('account_login'), {'login': 'john', 'password': 'doe'}) self.assertRedirects(resp, reverse('account_inactive')) # Inactive and unverified user account user = get_user_model().objects.create(username='doe', is_active=False) user.set_password('john') user.save() EmailAddress.objects.create(user=user, email='doe@example.com', primary=True, verified=False) resp = self.client.post(reverse('account_login'), {'login': 'doe', 'password': 'john'}) self.assertRedirects(resp, reverse('account_inactive')) def test_ajax_password_reset(self): get_user_model().objects.create( username='john', email='john@doe.org', is_active=True) resp = self.client.post( reverse('account_reset_password'), data={'email': 'john@doe.org'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ['john@doe.org']) self.assertEqual(resp['content-type'], 'application/json') def test_ajax_login_fail(self): resp = self.client.post(reverse('account_login'), {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') self.assertEqual(resp.status_code, 400) json.loads(resp.content.decode('utf8')) # TODO: Actually test something @override_settings( ACCOUNT_EMAIL_VERIFICATION=app_settings.EmailVerificationMethod .OPTIONAL) def test_ajax_login_success(self): user = get_user_model().objects.create(username='john', is_active=True) user.set_password('doe') user.save() resp = self.client.post(reverse('account_login'), {'login': 'john', 'password': 'doe'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') self.assertEqual(resp.status_code, 200) data = json.loads(resp.content.decode('utf8')) self.assertEqual(data['location'], '/accounts/profile/') def test_email_view(self): self._create_user_and_login() self.client.get(reverse('account_email')) # TODO: Actually test something @override_settings(ACCOUNT_LOGOUT_ON_GET=True) def test_logout_view_on_get(self): c, resp = self._logout_view('get') self.assertTemplateUsed(resp, 'account/messages/logged_out.txt') @override_settings(ACCOUNT_LOGOUT_ON_GET=False) def test_logout_view_on_post(self): c, resp = self._logout_view('get') self.assertTemplateUsed( resp, 'account/logout.%s' % app_settings.TEMPLATE_EXTENSION) resp = c.post(reverse('account_logout')) self.assertTemplateUsed(resp, 'account/messages/logged_out.txt') def _logout_view(self, method): c = Client() user = get_user_model().objects.create(username='john', is_active=True) user.set_password('doe') user.save() c = Client() c.login(username='john', password='doe') return c, getattr(c, method)(reverse('account_logout')) @override_settings(ACCOUNT_EMAIL_VERIFICATION=app_settings .EmailVerificationMethod.OPTIONAL) def test_optional_email_verification(self): c = Client() # Signup c.get(reverse('account_signup')) resp = c.post(reverse('account_signup'), {'username': 'johndoe', 'email': 'john@doe.com', 'password1': 'johndoe', 'password2': 'johndoe'}) # Logged in self.assertRedirects(resp, settings.LOGIN_REDIRECT_URL, fetch_redirect_response=False) self.assertEqual(mail.outbox[0].to, ['john@doe.com']) self.assertEqual(len(mail.outbox), 1) # Logout & login again c.logout() # Wait for cooldown EmailConfirmation.objects.update(sent=now() - timedelta(days=1)) # Signup resp = c.post(reverse('account_login'), {'login': 'johndoe', 'password': 'johndoe'}) self.assertRedirects(resp, settings.LOGIN_REDIRECT_URL, fetch_redirect_response=False) self.assertEqual(mail.outbox[0].to, ['john@doe.com']) # There was an issue that we sent out email confirmation mails # on each login in case of optional verification. Make sure # this is not the case: self.assertEqual(len(mail.outbox), 1) @override_settings(ACCOUNT_AUTHENTICATED_LOGIN_REDIRECTS=False) def test_account_authenticated_login_redirects_is_false(self): self._create_user_and_login() resp = self.client.get(reverse('account_login')) self.assertEqual(resp.status_code, 200) @override_settings(AUTH_PASSWORD_VALIDATORS=[{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': { 'min_length': 9, } }]) def test_django_password_validation(self): if django.VERSION < (1, 9, ): return resp = self.client.post( reverse('account_signup'), {'username': 'johndoe', 'email': 'john@doe.com', 'password1': 'johndoe', 'password2': 'johndoe'}) self.assertFormError(resp, 'form', None, []) self.assertFormError( resp, 'form', 'password1', ['This password is too short.' ' It must contain at least 9 characters.']) @override_settings(ACCOUNT_EMAIL_CONFIRMATION_HMAC=True) def test_email_confirmation_hmac_falls_back(self): user = self._create_user() email = EmailAddress.objects.create( user=user, email='a@b.com', verified=False, primary=True) confirmation = EmailConfirmation.create(email) confirmation.sent = now() confirmation.save() self.client.post( reverse('account_confirm_email', args=[confirmation.key])) email = EmailAddress.objects.get(pk=email.pk) self.assertTrue(email.verified) @override_settings(ACCOUNT_EMAIL_CONFIRMATION_HMAC=True) def test_email_confirmation_hmac(self): user = self._create_user() email = EmailAddress.objects.create( user=user, email='a@b.com', verified=False, primary=True) confirmation = EmailConfirmationHMAC(email) confirmation.send() self.assertEqual(len(mail.outbox), 1) self.client.post( reverse('account_confirm_email', args=[confirmation.key])) email = EmailAddress.objects.get(pk=email.pk) self.assertTrue(email.verified) @override_settings( ACCOUNT_EMAIL_CONFIRMATION_HMAC=True, ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS=0) def test_email_confirmation_hmac_timeout(self): user = self._create_user() email = EmailAddress.objects.create( user=user, email='a@b.com', verified=False, primary=True) confirmation = EmailConfirmationHMAC(email) confirmation.send() self.assertEqual(len(mail.outbox), 1) self.client.post( reverse('account_confirm_email', args=[confirmation.key])) email = EmailAddress.objects.get(pk=email.pk) self.assertFalse(email.verified) class EmailFormTests(TestCase): def setUp(self): User = get_user_model() self.user = User.objects.create(username='john', email='john1@doe.org') self.user.set_password('doe') self.user.save() self.email_address = EmailAddress.objects.create( user=self.user, email=self.user.email, verified=True, primary=True) self.email_address2 = EmailAddress.objects.create( user=self.user, email='john2@doe.org', verified=False, primary=False) self.client.login(username='john', password='doe') def test_add(self): resp = self.client.post( reverse('account_email'), {'action_add': '', 'email': 'john3@doe.org'}) EmailAddress.objects.get( email='john3@doe.org', user=self.user, verified=False, primary=False) self.assertTemplateUsed(resp, 'account/messages/email_confirmation_sent.txt') def test_ajax_add(self): resp = self.client.post( reverse('account_email'), {'action_add': '', 'email': 'john3@doe.org'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') data = json.loads(resp.content.decode('utf8')) self.assertEqual(data['location'], reverse('account_email')) def test_ajax_add_invalid(self): resp = self.client.post( reverse('account_email'), {'action_add': '', 'email': 'john3#doe.org'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') data = json.loads(resp.content.decode('utf8')) self.assertTrue('form_errors' in data) self.assertTrue('email' in data['form_errors']) def test_remove_primary(self): resp = self.client.post( reverse('account_email'), {'action_remove': '', 'email': self.email_address.email}) EmailAddress.objects.get(pk=self.email_address.pk) self.assertTemplateUsed( resp, 'account/messages/cannot_delete_primary_email.txt') def test_ajax_remove_primary(self): resp = self.client.post( reverse('account_email'), {'action_remove': '', 'email': self.email_address.email}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') self.assertTemplateUsed( resp, 'account/messages/cannot_delete_primary_email.txt') data = json.loads(resp.content.decode('utf8')) self.assertEqual(data['location'], reverse('account_email')) def test_remove_secondary(self): resp = self.client.post( reverse('account_email'), {'action_remove': '', 'email': self.email_address2.email}) self.assertRaises(EmailAddress.DoesNotExist, lambda: EmailAddress.objects.get( pk=self.email_address2.pk)) self.assertTemplateUsed( resp, 'account/messages/email_deleted.txt') def test_set_primary_unverified(self): resp = self.client.post( reverse('account_email'), {'action_primary': '', 'email': self.email_address2.email}) email_address = EmailAddress.objects.get(pk=self.email_address.pk) email_address2 = EmailAddress.objects.get(pk=self.email_address2.pk) self.assertFalse(email_address2.primary) self.assertTrue(email_address.primary) self.assertTemplateUsed( resp, 'account/messages/unverified_primary_email.txt') def test_set_primary(self): email_address2 = EmailAddress.objects.get(pk=self.email_address2.pk) email_address2.verified = True email_address2.save() resp = self.client.post( reverse('account_email'), {'action_primary': '', 'email': self.email_address2.email}) email_address = EmailAddress.objects.get(pk=self.email_address.pk) email_address2 = EmailAddress.objects.get(pk=self.email_address2.pk) self.assertFalse(email_address.primary) self.assertTrue(email_address2.primary) self.assertTemplateUsed( resp, 'account/messages/primary_email_set.txt') def test_verify(self): resp = self.client.post( reverse('account_email'), {'action_send': '', 'email': self.email_address2.email}) self.assertTemplateUsed( resp, 'account/messages/email_confirmation_sent.txt') class BaseSignupFormTests(TestCase): @override_settings( ACCOUNT_USERNAME_REQUIRED=True, ACCOUNT_USERNAME_BLACKLIST=['username']) def test_username_in_blacklist(self): data = { 'username': 'username', 'email': 'user@example.com', } form = BaseSignupForm(data, email_required=True) self.assertFalse(form.is_valid()) @override_settings( ACCOUNT_USERNAME_REQUIRED=True, ACCOUNT_USERNAME_BLACKLIST=['username']) def test_username_not_in_blacklist(self): data = { 'username': 'theusername', 'email': 'user@example.com', } form = BaseSignupForm(data, email_required=True) self.assertTrue(form.is_valid()) @override_settings(ACCOUNT_USERNAME_REQUIRED=True) def test_username_maxlength(self): data = { 'username': 'username', 'email': 'user@example.com', } form = BaseSignupForm(data, email_required=True) max_length = get_username_max_length() field = form.fields['username'] self.assertEqual(field.max_length, max_length) widget = field.widget self.assertEqual(widget.attrs.get('maxlength'), str(max_length)) @override_settings( ACCOUNT_USERNAME_REQUIRED=True, ACCOUNT_SIGNUP_EMAIL_ENTER_TWICE=True) def test_signup_email_verification(self): data = { 'username': 'username', 'email': 'user@example.com', } form = BaseSignupForm(data, email_required=True) self.assertFalse(form.is_valid()) data = { 'username': 'username', 'email1': 'user@example.com', 'email2': 'user@example.com', } form = BaseSignupForm(data, email_required=True) self.assertTrue(form.is_valid()) data['email2'] = 'anotheruser@example.com' form = BaseSignupForm(data, email_required=True) self.assertFalse(form.is_valid()) class AuthenticationBackendTests(TestCase): def setUp(self): user = get_user_model().objects.create( is_active=True, email='john@doe.com', username='john') user.set_password(user.username) user.save() self.user = user @override_settings( ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) # noqa def test_auth_by_username(self): user = self.user backend = AuthenticationBackend() self.assertEqual( backend.authenticate( username=user.username, password=user.username).pk, user.pk) self.assertEqual( backend.authenticate( username=user.email, password=user.username), None) @override_settings( ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) # noqa def test_auth_by_email(self): user = self.user backend = AuthenticationBackend() self.assertEqual( backend.authenticate( username=user.email, password=user.username).pk, user.pk) self.assertEqual( backend.authenticate( username=user.username, password=user.username), None) @override_settings( ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) # noqa def test_auth_by_username_or_email(self): user = self.user backend = AuthenticationBackend() self.assertEqual( backend.authenticate( username=user.email, password=user.username).pk, user.pk) self.assertEqual( backend.authenticate( username=user.username, password=user.username).pk, user.pk) class UtilsTests(TestCase): def setUp(self): if hasattr(models, 'UUIDField'): self.user_id = uuid.uuid4().hex class UUIDUser(AbstractUser): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) class Meta(AbstractUser.Meta): swappable = 'AUTH_USER_MODEL' else: UUIDUser = get_user_model() self.UUIDUser = UUIDUser @unittest.skipUnless(hasattr(models, 'UUIDField'), reason="No UUIDField in this django version") def test_url_str_to_pk_identifies_UUID_as_stringlike(self): with patch('allauth.account.utils.get_user_model') as mocked_gum: mocked_gum.return_value = self.UUIDUser self.assertEqual(url_str_to_user_pk(self.user_id), self.user_id) def test_pk_to_url_string_identifies_UUID_as_stringlike(self): user = self.UUIDUser( is_active=True, email='john@doe.com', username='john') self.assertEquals(user_pk_to_url_str(user), str(user.pk))
40.093074
95
0.582195
from __future__ import absolute_import import json from datetime import timedelta import django from django.utils.timezone import now from django.test.utils import override_settings from django.conf import settings from django.core.urlresolvers import reverse from django.test.client import Client from django.core import mail from django.test.client import RequestFactory from django.contrib.auth.models import AnonymousUser, AbstractUser from django.db import models import unittest from allauth.tests import TestCase, patch from allauth.account.forms import BaseSignupForm from allauth.account.models import ( EmailAddress, EmailConfirmation, EmailConfirmationHMAC) from allauth.utils import ( get_current_site, get_user_model, get_username_max_length) from . import app_settings from .auth_backends import AuthenticationBackend from .adapter import get_adapter from .utils import url_str_to_user_pk, user_pk_to_url_str import uuid @override_settings( ACCOUNT_DEFAULT_HTTP_PROTOCOL='https', ACCOUNT_EMAIL_VERIFICATION=app_settings.EmailVerificationMethod.MANDATORY, ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME, ACCOUNT_SIGNUP_FORM_CLASS=None, ACCOUNT_EMAIL_SUBJECT_PREFIX=None, LOGIN_REDIRECT_URL='/accounts/profile/', ACCOUNT_ADAPTER='allauth.account.adapter.DefaultAccountAdapter', ACCOUNT_USERNAME_REQUIRED=True) class AccountTests(TestCase): def setUp(self): if 'allauth.socialaccount' in settings.INSTALLED_APPS: from ..socialaccount.models import SocialApp sa = SocialApp.objects.create(name='testfb', provider='facebook') sa.sites.add(get_current_site()) @override_settings( ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod .USERNAME_EMAIL) def test_username_containing_at(self): user = get_user_model().objects.create(username='@raymond.penners') user.set_password('psst') user.save() EmailAddress.objects.create(user=user, email='raymond.penners@gmail.com', primary=True, verified=True) resp = self.client.post(reverse('account_login'), {'login': '@raymond.penners', 'password': 'psst'}) self.assertRedirects(resp, 'http://testserver'+settings.LOGIN_REDIRECT_URL, fetch_redirect_response=False) def test_signup_same_email_verified_externally(self): user = self._test_signup_email_verified_externally('john@doe.com', 'john@doe.com') self.assertEqual(EmailAddress.objects.filter(user=user).count(), 1) EmailAddress.objects.get(verified=True, email='john@doe.com', user=user, primary=True) def test_signup_other_email_verified_externally(self): user = self._test_signup_email_verified_externally('john@home.com', 'john@work.com') self.assertEqual(EmailAddress.objects.filter(user=user).count(), 2) EmailAddress.objects.get(verified=False, email='john@home.com', user=user, primary=False) EmailAddress.objects.get(verified=True, email='john@work.com', user=user, primary=True) def _test_signup_email_verified_externally(self, signup_email, verified_email): username = 'johndoe' request = RequestFactory().post(reverse('account_signup'), {'username': username, 'email': signup_email, 'password1': 'johndoe', 'password2': 'johndoe'}) from django.contrib.messages.middleware import MessageMiddleware from django.contrib.sessions.middleware import SessionMiddleware SessionMiddleware().process_request(request) MessageMiddleware().process_request(request) request.user = AnonymousUser() request.session['account_verified_email'] = verified_email from .views import signup resp = signup(request) self.assertEqual(resp.status_code, 302) self.assertEqual(resp['location'], get_adapter().get_login_redirect_url(request)) self.assertEqual(len(mail.outbox), 0) return get_user_model().objects.get(username=username) @override_settings( ACCOUNT_USERNAME_REQUIRED=True, ACCOUNT_SIGNUP_EMAIL_ENTER_TWICE=True) def test_signup_email_twice(self): request = RequestFactory().post(reverse('account_signup'), {'username': 'johndoe', 'email1': 'john@work.com', 'email2': 'john@work.com', 'password1': 'johndoe', 'password2': 'johndoe'}) from django.contrib.messages.middleware import MessageMiddleware from django.contrib.sessions.middleware import SessionMiddleware SessionMiddleware().process_request(request) MessageMiddleware().process_request(request) request.user = AnonymousUser() from .views import signup signup(request) user = get_user_model().objects.get(username='johndoe') self.assertEqual(user.email, 'john@work.com') def _create_user(self): user = get_user_model().objects.create(username='john', is_active=True) user.set_password('doe') user.save() return user def _create_user_and_login(self): user = self._create_user() self.client.login(username='john', password='doe') return user def test_redirect_when_authenticated(self): self._create_user_and_login() c = self.client resp = c.get(reverse('account_login')) self.assertRedirects(resp, 'http://testserver/accounts/profile/', fetch_redirect_response=False) def test_password_reset_get(self): resp = self.client.get(reverse('account_reset_password')) self.assertTemplateUsed(resp, 'account/password_reset.html') def test_password_set_redirect(self): resp = self._password_set_or_reset_redirect('account_set_password', True) self.assertEqual(resp.status_code, 302) def test_password_reset_no_redirect(self): resp = self._password_set_or_reset_redirect('account_change_password', True) self.assertEqual(resp.status_code, 200) def test_password_set_no_redirect(self): resp = self._password_set_or_reset_redirect('account_set_password', False) self.assertEqual(resp.status_code, 200) def test_password_reset_redirect(self): resp = self._password_set_or_reset_redirect('account_change_password', False) self.assertEqual(resp.status_code, 302) def _password_set_or_reset_redirect(self, urlname, usable_password): user = self._create_user_and_login() c = self.client if not usable_password: user.set_unusable_password() user.save() resp = c.get(reverse(urlname)) return resp def test_password_forgotten_username_hint(self): user = self._request_new_password() body = mail.outbox[0].body assert user.username in body @override_settings( ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) def test_password_forgotten_no_username_hint(self): user = self._request_new_password() body = mail.outbox[0].body assert user.username not in body def _request_new_password(self): user = get_user_model().objects.create( username='john', email='john@doe.org', is_active=True) user.set_password('doe') user.save() self.client.post( reverse('account_reset_password'), data={'email': 'john@doe.org'}) self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ['john@doe.org']) return user def test_password_reset_flow(self): user = self._request_new_password() body = mail.outbox[0].body self.assertGreater(body.find('https://'), 0) url = body[body.find('/password/reset/'):].split()[0] resp = self.client.get(url) self.assertTemplateUsed( resp, 'account/password_reset_from_key.%s' % app_settings.TEMPLATE_EXTENSION) self.assertFalse('token_fail' in resp.context_data) resp = self.client.post(url, {'password1': 'newpass123', 'password2': 'newpass123'}) self.assertRedirects(resp, reverse('account_reset_password_from_key_done')) user = get_user_model().objects.get(pk=user.pk) self.assertTrue(user.check_password('newpass123')) resp = self.client.post(url, {'password1': 'newpass123', 'password2': 'newpass123'}) self.assertTemplateUsed( resp, 'account/password_reset_from_key.%s' % app_settings.TEMPLATE_EXTENSION) self.assertTrue(resp.context_data['token_fail']) response = self.client.get(url) self.assertTemplateUsed( response, 'account/password_reset_from_key.%s' % app_settings.TEMPLATE_EXTENSION) self.assertTrue(response.context_data['token_fail']) response = self.client.post(url, {'password1': 'newpass123', 'password2': 'newpass123'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') self.assertEqual(response.status_code, 400) data = json.loads(response.content.decode('utf8')) self.assertTrue('form_errors' in data) self.assertTrue('__all__' in data['form_errors']) @override_settings(ACCOUNT_LOGIN_ON_PASSWORD_RESET=True) def test_password_reset_ACCOUNT_LOGIN_ON_PASSWORD_RESET(self): user = self._request_new_password() body = mail.outbox[0].body url = body[body.find('/password/reset/'):].split()[0] resp = self.client.post( url, {'password1': 'newpass123', 'password2': 'newpass123'}) self.assertTrue(user.is_authenticated()) self.assertRedirects(resp, '/confirm-email/') @override_settings(ACCOUNT_EMAIL_CONFIRMATION_HMAC=False) def test_email_verification_mandatory(self): c = Client() resp = c.post(reverse('account_signup'), {'username': 'johndoe', 'email': 'john@doe.com', 'password1': 'johndoe', 'password2': 'johndoe'}, follow=True) self.assertEqual(resp.status_code, 200) self.assertEqual(mail.outbox[0].to, ['john@doe.com']) self.assertGreater(mail.outbox[0].body.find('https://'), 0) self.assertEqual(len(mail.outbox), 1) self.assertTemplateUsed( resp, 'account/verification_sent.%s' % app_settings.TEMPLATE_EXTENSION) for attempt in [1, 2]: resp = c.post(reverse('account_login'), {'login': 'johndoe', 'password': 'johndoe'}, follow=True) # users verify their email adresses. self.assertTrue(get_user_model().objects.filter( username='johndoe', is_active=True).exists()) self.assertTemplateUsed( resp, 'account/verification_sent.' + app_settings.TEMPLATE_EXTENSION) # Attempt 1: no mail is sent due to cool-down , # but there was already a mail in the outbox. self.assertEqual(len(mail.outbox), attempt) self.assertEqual( EmailConfirmation.objects.filter( email_address__email='john@doe.com').count(), attempt) # Wait for cooldown EmailConfirmation.objects.update(sent=now() - timedelta(days=1)) # Verify, and re-attempt to login. confirmation = EmailConfirmation \ .objects \ .filter(email_address__user__username='johndoe')[:1] \ .get() resp = c.get(reverse('account_confirm_email', args=[confirmation.key])) self.assertTemplateUsed( resp, 'account/email_confirm.%s' % app_settings.TEMPLATE_EXTENSION) c.post(reverse('account_confirm_email', args=[confirmation.key])) resp = c.post(reverse('account_login'), {'login': 'johndoe', 'password': 'johndoe'}) self.assertRedirects(resp, 'http://testserver'+settings.LOGIN_REDIRECT_URL, fetch_redirect_response=False) def test_email_escaping(self): site = get_current_site() site.name = '<enc&"test>' site.save() u = get_user_model().objects.create( username='test', email='foo@bar.com') request = RequestFactory().get('/') EmailAddress.objects.add_email(request, u, u.email, confirm=True) self.assertTrue(mail.outbox[0].subject[1:].startswith(site.name)) @override_settings( ACCOUNT_EMAIL_VERIFICATION=app_settings.EmailVerificationMethod .OPTIONAL) def test_login_unverified_account_optional(self): user = get_user_model().objects.create(username='john') user.set_password('doe') user.save() EmailAddress.objects.create(user=user, email='john@example.com', primary=True, verified=False) resp = self.client.post(reverse('account_login'), {'login': 'john', 'password': 'doe'}) self.assertRedirects(resp, 'http://testserver'+settings.LOGIN_REDIRECT_URL, fetch_redirect_response=False) @override_settings( ACCOUNT_EMAIL_VERIFICATION=app_settings.EmailVerificationMethod .OPTIONAL, ACCOUNT_LOGIN_ATTEMPTS_LIMIT=3) def test_login_failed_attempts_exceeded(self): user = get_user_model().objects.create(username='john') user.set_password('doe') user.save() EmailAddress.objects.create(user=user, email='john@example.com', primary=True, verified=False) for i in range(5): is_valid_attempt = (i == 4) is_locked = (i >= 3) resp = self.client.post( reverse('account_login'), {'login': 'john', 'password': ( 'doe' if is_valid_attempt else 'wrong')}) self.assertFormError( resp, 'form', None, 'Too many failed login attempts. Try again later.' if is_locked else 'The username and/or password you specified are not correct.') def test_login_unverified_account_mandatory(self): user = get_user_model().objects.create(username='john') user.set_password('doe') user.save() EmailAddress.objects.create(user=user, email='john@example.com', primary=True, verified=False) resp = self.client.post(reverse('account_login'), {'login': 'john', 'password': 'doe'}) self.assertRedirects(resp, reverse('account_email_verification_sent')) def test_login_inactive_account(self): # Inactive and verified user account user = get_user_model().objects.create(username='john', is_active=False) user.set_password('doe') user.save() EmailAddress.objects.create(user=user, email='john@example.com', primary=True, verified=True) resp = self.client.post(reverse('account_login'), {'login': 'john', 'password': 'doe'}) self.assertRedirects(resp, reverse('account_inactive')) # Inactive and unverified user account user = get_user_model().objects.create(username='doe', is_active=False) user.set_password('john') user.save() EmailAddress.objects.create(user=user, email='doe@example.com', primary=True, verified=False) resp = self.client.post(reverse('account_login'), {'login': 'doe', 'password': 'john'}) self.assertRedirects(resp, reverse('account_inactive')) def test_ajax_password_reset(self): get_user_model().objects.create( username='john', email='john@doe.org', is_active=True) resp = self.client.post( reverse('account_reset_password'), data={'email': 'john@doe.org'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].to, ['john@doe.org']) self.assertEqual(resp['content-type'], 'application/json') def test_ajax_login_fail(self): resp = self.client.post(reverse('account_login'), {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') self.assertEqual(resp.status_code, 400) json.loads(resp.content.decode('utf8')) # TODO: Actually test something @override_settings( ACCOUNT_EMAIL_VERIFICATION=app_settings.EmailVerificationMethod .OPTIONAL) def test_ajax_login_success(self): user = get_user_model().objects.create(username='john', is_active=True) user.set_password('doe') user.save() resp = self.client.post(reverse('account_login'), {'login': 'john', 'password': 'doe'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') self.assertEqual(resp.status_code, 200) data = json.loads(resp.content.decode('utf8')) self.assertEqual(data['location'], '/accounts/profile/') def test_email_view(self): self._create_user_and_login() self.client.get(reverse('account_email')) # TODO: Actually test something @override_settings(ACCOUNT_LOGOUT_ON_GET=True) def test_logout_view_on_get(self): c, resp = self._logout_view('get') self.assertTemplateUsed(resp, 'account/messages/logged_out.txt') @override_settings(ACCOUNT_LOGOUT_ON_GET=False) def test_logout_view_on_post(self): c, resp = self._logout_view('get') self.assertTemplateUsed( resp, 'account/logout.%s' % app_settings.TEMPLATE_EXTENSION) resp = c.post(reverse('account_logout')) self.assertTemplateUsed(resp, 'account/messages/logged_out.txt') def _logout_view(self, method): c = Client() user = get_user_model().objects.create(username='john', is_active=True) user.set_password('doe') user.save() c = Client() c.login(username='john', password='doe') return c, getattr(c, method)(reverse('account_logout')) @override_settings(ACCOUNT_EMAIL_VERIFICATION=app_settings .EmailVerificationMethod.OPTIONAL) def test_optional_email_verification(self): c = Client() # Signup c.get(reverse('account_signup')) resp = c.post(reverse('account_signup'), {'username': 'johndoe', 'email': 'john@doe.com', 'password1': 'johndoe', 'password2': 'johndoe'}) # Logged in self.assertRedirects(resp, settings.LOGIN_REDIRECT_URL, fetch_redirect_response=False) self.assertEqual(mail.outbox[0].to, ['john@doe.com']) self.assertEqual(len(mail.outbox), 1) # Logout & login again c.logout() # Wait for cooldown EmailConfirmation.objects.update(sent=now() - timedelta(days=1)) # Signup resp = c.post(reverse('account_login'), {'login': 'johndoe', 'password': 'johndoe'}) self.assertRedirects(resp, settings.LOGIN_REDIRECT_URL, fetch_redirect_response=False) self.assertEqual(mail.outbox[0].to, ['john@doe.com']) # There was an issue that we sent out email confirmation mails # on each login in case of optional verification. Make sure # this is not the case: self.assertEqual(len(mail.outbox), 1) @override_settings(ACCOUNT_AUTHENTICATED_LOGIN_REDIRECTS=False) def test_account_authenticated_login_redirects_is_false(self): self._create_user_and_login() resp = self.client.get(reverse('account_login')) self.assertEqual(resp.status_code, 200) @override_settings(AUTH_PASSWORD_VALIDATORS=[{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': { 'min_length': 9, } }]) def test_django_password_validation(self): if django.VERSION < (1, 9, ): return resp = self.client.post( reverse('account_signup'), {'username': 'johndoe', 'email': 'john@doe.com', 'password1': 'johndoe', 'password2': 'johndoe'}) self.assertFormError(resp, 'form', None, []) self.assertFormError( resp, 'form', 'password1', ['This password is too short.' ' It must contain at least 9 characters.']) @override_settings(ACCOUNT_EMAIL_CONFIRMATION_HMAC=True) def test_email_confirmation_hmac_falls_back(self): user = self._create_user() email = EmailAddress.objects.create( user=user, email='a@b.com', verified=False, primary=True) confirmation = EmailConfirmation.create(email) confirmation.sent = now() confirmation.save() self.client.post( reverse('account_confirm_email', args=[confirmation.key])) email = EmailAddress.objects.get(pk=email.pk) self.assertTrue(email.verified) @override_settings(ACCOUNT_EMAIL_CONFIRMATION_HMAC=True) def test_email_confirmation_hmac(self): user = self._create_user() email = EmailAddress.objects.create( user=user, email='a@b.com', verified=False, primary=True) confirmation = EmailConfirmationHMAC(email) confirmation.send() self.assertEqual(len(mail.outbox), 1) self.client.post( reverse('account_confirm_email', args=[confirmation.key])) email = EmailAddress.objects.get(pk=email.pk) self.assertTrue(email.verified) @override_settings( ACCOUNT_EMAIL_CONFIRMATION_HMAC=True, ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS=0) def test_email_confirmation_hmac_timeout(self): user = self._create_user() email = EmailAddress.objects.create( user=user, email='a@b.com', verified=False, primary=True) confirmation = EmailConfirmationHMAC(email) confirmation.send() self.assertEqual(len(mail.outbox), 1) self.client.post( reverse('account_confirm_email', args=[confirmation.key])) email = EmailAddress.objects.get(pk=email.pk) self.assertFalse(email.verified) class EmailFormTests(TestCase): def setUp(self): User = get_user_model() self.user = User.objects.create(username='john', email='john1@doe.org') self.user.set_password('doe') self.user.save() self.email_address = EmailAddress.objects.create( user=self.user, email=self.user.email, verified=True, primary=True) self.email_address2 = EmailAddress.objects.create( user=self.user, email='john2@doe.org', verified=False, primary=False) self.client.login(username='john', password='doe') def test_add(self): resp = self.client.post( reverse('account_email'), {'action_add': '', 'email': 'john3@doe.org'}) EmailAddress.objects.get( email='john3@doe.org', user=self.user, verified=False, primary=False) self.assertTemplateUsed(resp, 'account/messages/email_confirmation_sent.txt') def test_ajax_add(self): resp = self.client.post( reverse('account_email'), {'action_add': '', 'email': 'john3@doe.org'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') data = json.loads(resp.content.decode('utf8')) self.assertEqual(data['location'], reverse('account_email')) def test_ajax_add_invalid(self): resp = self.client.post( reverse('account_email'), {'action_add': '', 'email': 'john3#doe.org'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') data = json.loads(resp.content.decode('utf8')) self.assertTrue('form_errors' in data) self.assertTrue('email' in data['form_errors']) def test_remove_primary(self): resp = self.client.post( reverse('account_email'), {'action_remove': '', 'email': self.email_address.email}) EmailAddress.objects.get(pk=self.email_address.pk) self.assertTemplateUsed( resp, 'account/messages/cannot_delete_primary_email.txt') def test_ajax_remove_primary(self): resp = self.client.post( reverse('account_email'), {'action_remove': '', 'email': self.email_address.email}, HTTP_X_REQUESTED_WITH='XMLHttpRequest') self.assertTemplateUsed( resp, 'account/messages/cannot_delete_primary_email.txt') data = json.loads(resp.content.decode('utf8')) self.assertEqual(data['location'], reverse('account_email')) def test_remove_secondary(self): resp = self.client.post( reverse('account_email'), {'action_remove': '', 'email': self.email_address2.email}) self.assertRaises(EmailAddress.DoesNotExist, lambda: EmailAddress.objects.get( pk=self.email_address2.pk)) self.assertTemplateUsed( resp, 'account/messages/email_deleted.txt') def test_set_primary_unverified(self): resp = self.client.post( reverse('account_email'), {'action_primary': '', 'email': self.email_address2.email}) email_address = EmailAddress.objects.get(pk=self.email_address.pk) email_address2 = EmailAddress.objects.get(pk=self.email_address2.pk) self.assertFalse(email_address2.primary) self.assertTrue(email_address.primary) self.assertTemplateUsed( resp, 'account/messages/unverified_primary_email.txt') def test_set_primary(self): email_address2 = EmailAddress.objects.get(pk=self.email_address2.pk) email_address2.verified = True email_address2.save() resp = self.client.post( reverse('account_email'), {'action_primary': '', 'email': self.email_address2.email}) email_address = EmailAddress.objects.get(pk=self.email_address.pk) email_address2 = EmailAddress.objects.get(pk=self.email_address2.pk) self.assertFalse(email_address.primary) self.assertTrue(email_address2.primary) self.assertTemplateUsed( resp, 'account/messages/primary_email_set.txt') def test_verify(self): resp = self.client.post( reverse('account_email'), {'action_send': '', 'email': self.email_address2.email}) self.assertTemplateUsed( resp, 'account/messages/email_confirmation_sent.txt') class BaseSignupFormTests(TestCase): @override_settings( ACCOUNT_USERNAME_REQUIRED=True, ACCOUNT_USERNAME_BLACKLIST=['username']) def test_username_in_blacklist(self): data = { 'username': 'username', 'email': 'user@example.com', } form = BaseSignupForm(data, email_required=True) self.assertFalse(form.is_valid()) @override_settings( ACCOUNT_USERNAME_REQUIRED=True, ACCOUNT_USERNAME_BLACKLIST=['username']) def test_username_not_in_blacklist(self): data = { 'username': 'theusername', 'email': 'user@example.com', } form = BaseSignupForm(data, email_required=True) self.assertTrue(form.is_valid()) @override_settings(ACCOUNT_USERNAME_REQUIRED=True) def test_username_maxlength(self): data = { 'username': 'username', 'email': 'user@example.com', } form = BaseSignupForm(data, email_required=True) max_length = get_username_max_length() field = form.fields['username'] self.assertEqual(field.max_length, max_length) widget = field.widget self.assertEqual(widget.attrs.get('maxlength'), str(max_length)) @override_settings( ACCOUNT_USERNAME_REQUIRED=True, ACCOUNT_SIGNUP_EMAIL_ENTER_TWICE=True) def test_signup_email_verification(self): data = { 'username': 'username', 'email': 'user@example.com', } form = BaseSignupForm(data, email_required=True) self.assertFalse(form.is_valid()) data = { 'username': 'username', 'email1': 'user@example.com', 'email2': 'user@example.com', } form = BaseSignupForm(data, email_required=True) self.assertTrue(form.is_valid()) data['email2'] = 'anotheruser@example.com' form = BaseSignupForm(data, email_required=True) self.assertFalse(form.is_valid()) class AuthenticationBackendTests(TestCase): def setUp(self): user = get_user_model().objects.create( is_active=True, email='john@doe.com', username='john') user.set_password(user.username) user.save() self.user = user @override_settings( ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME) # noqa def test_auth_by_username(self): user = self.user backend = AuthenticationBackend() self.assertEqual( backend.authenticate( username=user.username, password=user.username).pk, user.pk) self.assertEqual( backend.authenticate( username=user.email, password=user.username), None) @override_settings( ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.EMAIL) # noqa def test_auth_by_email(self): user = self.user backend = AuthenticationBackend() self.assertEqual( backend.authenticate( username=user.email, password=user.username).pk, user.pk) self.assertEqual( backend.authenticate( username=user.username, password=user.username), None) @override_settings( ACCOUNT_AUTHENTICATION_METHOD=app_settings.AuthenticationMethod.USERNAME_EMAIL) # noqa def test_auth_by_username_or_email(self): user = self.user backend = AuthenticationBackend() self.assertEqual( backend.authenticate( username=user.email, password=user.username).pk, user.pk) self.assertEqual( backend.authenticate( username=user.username, password=user.username).pk, user.pk) class UtilsTests(TestCase): def setUp(self): if hasattr(models, 'UUIDField'): self.user_id = uuid.uuid4().hex class UUIDUser(AbstractUser): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) class Meta(AbstractUser.Meta): swappable = 'AUTH_USER_MODEL' else: UUIDUser = get_user_model() self.UUIDUser = UUIDUser @unittest.skipUnless(hasattr(models, 'UUIDField'), reason="No UUIDField in this django version") def test_url_str_to_pk_identifies_UUID_as_stringlike(self): with patch('allauth.account.utils.get_user_model') as mocked_gum: mocked_gum.return_value = self.UUIDUser self.assertEqual(url_str_to_user_pk(self.user_id), self.user_id) def test_pk_to_url_string_identifies_UUID_as_stringlike(self): user = self.UUIDUser( is_active=True, email='john@doe.com', username='john') self.assertEquals(user_pk_to_url_str(user), str(user.pk))
true
true
f72476a51168fd61ef40256da67527b38bad600a
5,368
py
Python
core/cache/decorator.py
prorevizor/noc
37e44b8afc64318b10699c06a1138eee9e7d6a4e
[ "BSD-3-Clause" ]
84
2017-10-22T11:01:39.000Z
2022-02-27T03:43:48.000Z
core/cache/decorator.py
prorevizor/noc
37e44b8afc64318b10699c06a1138eee9e7d6a4e
[ "BSD-3-Clause" ]
22
2017-12-11T07:21:56.000Z
2021-09-23T02:53:50.000Z
core/cache/decorator.py
prorevizor/noc
37e44b8afc64318b10699c06a1138eee9e7d6a4e
[ "BSD-3-Clause" ]
23
2017-12-06T06:59:52.000Z
2022-02-24T00:02:25.000Z
# ---------------------------------------------------------------------- # Decorators # ---------------------------------------------------------------------- # Copyright (C) 2007-2020 The NOC Project # See LICENSE for details # ---------------------------------------------------------------------- # NOC modules from noc.core.perf import metrics from .base import cache as x_cache def cachedmethod(cache=None, key="cache-%s", lock=None, ttl=None, version=0): """ Decorator to wrap class instance or method with memoizing callable :param cache: In-memory function which follows dict protocol. None, when no in-memory caching required :param key: Key mask to convert args to string :param lock: Callable to get threading lock :param ttl: Record time-to-live :param version: External cache version :return: """ def decorator(method): if lock: def wrapper(self, *args, **kwargs): perf_key = key.replace("-%s", "").replace("-", "_") perf_key_requests = metrics["cache_requests", ("cache_key", perf_key)] perf_key_l1_hits = metrics[ "cache_hits", ("cache_key", perf_key), ("cache_level", "internal") ] perf_key_l2_hits = metrics[ "cache_hits", ("cache_key", perf_key), ("cache_level", "external") ] perf_key_misses = metrics["cache_misses", ("cache_key", perf_key)] perf_key_lock_acquires = metrics["cache_locks_acquires", ("cache_key", perf_key)] perf_key_requests += 1 k = key % args with lock(self): perf_key_lock_acquires += 1 if cache: # Try in-memory cache c = cache(self) if c is not None: # In-memory cache provided try: v = c[k] perf_key_l1_hits += 1 return v except KeyError: pass # Try external cache v = x_cache.get(k, version=version) if v: perf_key_l2_hits += 1 if cache: with lock(self): perf_key_lock_acquires += 1 # Backfill in-memory cache try: c[k] = v except ValueError: pass # Value too large return v # Fallback to function perf_key_misses += 1 v = method(self, *args, **kwargs) with lock(self): perf_key_lock_acquires += 1 if cache: # Backfill in-memory cache try: c[k] = v except ValueError: pass # Backfill external cache x_cache.set(k, v, ttl=ttl, version=version) # Done return v else: def wrapper(self, *args, **kwargs): perf_key = key.replace("-%s", "").replace("-", "_") perf_key_requests = metrics["cache_requests", ("cache_key", perf_key)] perf_key_l1_hits = metrics[ "cache_hits", ("cache_key", perf_key), ("cache_level", "internal") ] perf_key_l2_hits = metrics[ "cache_hits", ("cache_key", perf_key), ("cache_level", "external") ] perf_key_misses = metrics["cache_misses", ("cache_key", perf_key)] perf_key_requests += 1 k = key % args if cache: # Try in-memory cache c = cache(self) if c is not None: # In-memory cache provided try: v = c[k] perf_key_l1_hits += 1 return v except KeyError: pass # Try external cache v = x_cache.get(k, version=version) if v: perf_key_l2_hits += 1 if cache: # Backfill in-memory cache try: c[k] = v except ValueError: pass # Value too large return v # Fallback to function perf_key_misses += 1 v = method(self, *args, **kwargs) if cache: # Backfill in-memory cache try: c[k] = v except ValueError: pass # Backfill external cache x_cache.set(k, v, ttl=ttl, version=version) # Done return v return wrapper return decorator
39.182482
97
0.404434
from noc.core.perf import metrics from .base import cache as x_cache def cachedmethod(cache=None, key="cache-%s", lock=None, ttl=None, version=0): def decorator(method): if lock: def wrapper(self, *args, **kwargs): perf_key = key.replace("-%s", "").replace("-", "_") perf_key_requests = metrics["cache_requests", ("cache_key", perf_key)] perf_key_l1_hits = metrics[ "cache_hits", ("cache_key", perf_key), ("cache_level", "internal") ] perf_key_l2_hits = metrics[ "cache_hits", ("cache_key", perf_key), ("cache_level", "external") ] perf_key_misses = metrics["cache_misses", ("cache_key", perf_key)] perf_key_lock_acquires = metrics["cache_locks_acquires", ("cache_key", perf_key)] perf_key_requests += 1 k = key % args with lock(self): perf_key_lock_acquires += 1 if cache: c = cache(self) if c is not None: try: v = c[k] perf_key_l1_hits += 1 return v except KeyError: pass v = x_cache.get(k, version=version) if v: perf_key_l2_hits += 1 if cache: with lock(self): perf_key_lock_acquires += 1 try: c[k] = v except ValueError: pass return v perf_key_misses += 1 v = method(self, *args, **kwargs) with lock(self): perf_key_lock_acquires += 1 if cache: try: c[k] = v except ValueError: pass x_cache.set(k, v, ttl=ttl, version=version) return v else: def wrapper(self, *args, **kwargs): perf_key = key.replace("-%s", "").replace("-", "_") perf_key_requests = metrics["cache_requests", ("cache_key", perf_key)] perf_key_l1_hits = metrics[ "cache_hits", ("cache_key", perf_key), ("cache_level", "internal") ] perf_key_l2_hits = metrics[ "cache_hits", ("cache_key", perf_key), ("cache_level", "external") ] perf_key_misses = metrics["cache_misses", ("cache_key", perf_key)] perf_key_requests += 1 k = key % args if cache: c = cache(self) if c is not None: try: v = c[k] perf_key_l1_hits += 1 return v except KeyError: pass v = x_cache.get(k, version=version) if v: perf_key_l2_hits += 1 if cache: try: c[k] = v except ValueError: pass return v perf_key_misses += 1 v = method(self, *args, **kwargs) if cache: try: c[k] = v except ValueError: pass x_cache.set(k, v, ttl=ttl, version=version) return v return wrapper return decorator
true
true
f72476bf2e961b26c53e96e9358bb4c0a54239b7
8,355
py
Python
tron/Vocab/hubCommands.py
sdss/tron
886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322
[ "BSD-3-Clause" ]
null
null
null
tron/Vocab/hubCommands.py
sdss/tron
886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322
[ "BSD-3-Clause" ]
null
null
null
tron/Vocab/hubCommands.py
sdss/tron
886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322
[ "BSD-3-Clause" ]
null
null
null
__all__ = ['hubCommands'] import sys import Vocab.InternalCmd as InternalCmd from tron import Misc, g, hub from tron.Hub.KV.KVDict import kvAsASCII class hubCommands(InternalCmd.InternalCmd): """ All the commands that the "hub" package provides. The user executes these from the command window: hub startNubs tspec hub status etc. """ def __init__(self, **argv): argv['safeCmds'] = r'^\s*(actors|commanders|actorInfo|version|status|ping)\s*$' argv['needsAuth'] = True InternalCmd.InternalCmd.__init__(self, 'hub', **argv) self.commands = { 'actors': self.actors, 'commanders': self.commanders, 'restart!': self.reallyReallyRestart, 'startNubs': self.startNubs, 'stopNubs': self.stopNubs, 'actorInfo': self.actorInfo, 'commands': self.commandInfo, 'setUsername': self.setUsername, 'status': self.status, 'loadWords': self.loadWords, 'getKeys': self.getKeys, 'listen': self.doListen, 'version': self.version, 'ping': self.status, 'relog': self.relog, } def version(self, cmd, finish=True): """ Return the hub's version number. """ hub.getSetHubVersion() vString = 'version=%s' % (g.KVs.getKV('hub', 'version', default='Unknown')) if finish: cmd.finish(vString) else: cmd.inform(vString) def doListen(self, cmd): """ Change what replies get sent to us. """ matched, unmatched, leftovers = cmd.match([('listen', None), ('addActors', None), ('delActors', None)]) cmdr = cmd.cmdr() if not cmdr: cmd.fail('debug=%s' % (Misc.qstr('cmdr=%s; cmd=%s' % (cmdr, cmd)))) return Misc.log('doListen', 'start: %s' % (cmdr.taster)) Misc.log('doListen', 'leftovers: %s' % (leftovers)) if 'addActors' in matched: actors = list(leftovers.keys()) Misc.log('doListen', 'addActors: %s' % (actors)) # cmd.inform('text="%s"' % (Misc.qstr("adding actors: %s" % (actors)))) cmdr.taster.addToFilter(actors, [], actors) cmd.finish() elif 'delActors' in matched: actors = list(leftovers.keys()) Misc.log('doListen', 'delActors: %s' % (actors)) # cmd.inform('text="%s"' % (Misc.qstr("removing actors: %s" % (actors)))) cmdr.taster.removeFromFilter(actors, [], actors) cmd.finish() else: cmd.fail('text="unknown listen command"') Misc.log('doListen', 'finish: %s' % (cmdr.taster)) def actors(self, cmd, finish=True): """ Return a list of the currently connected actors. """ g.actors.listSelf(cmd=cmd) if finish: cmd.finish('') def commanders(self, cmd, finish=True): """ Return a list of the currently connected commanders. """ g.commanders.listSelf(cmd=cmd) if finish: cmd.finish('') def status(self, cmd, finish=True): Misc.cfg.flush() self.version(cmd, finish=False) self.actors(cmd, finish=False) self.commanders(cmd, finish=False) if finish: cmd.finish('') def setUsername(self, cmd): """ Change the username for the cmd's commander. """ args = cmd.cmd.split() args = args[1:] if len(args) != 1: cmd.fail('cmdError="usage: setUsername newname"') return username = args[0] cmdr = cmd.cmdr() cmdr.setName(username) cmd.finish('') def stopNubs(self, cmd): """ stop a list of nubs. """ nubs = list(cmd.argDict.keys())[1:] if len(nubs) == 0: cmd.fail('text="must specify one or more nubs to stop..."') return for nub in nubs: try: cmd.inform('text=%s' % (Misc.qstr('stopping nub %s' % (nub)))) hub.stopNub(nub) except Exception as e: cmd.warn('text=%s' % (Misc.qstr('failed to stop nub %s: %s' % (nub, e)))) cmd.finish('') def startNubs(self, cmd): """ (re-)start a list of nubs. """ # Flush the configuration to force a reload later. This allows to change the # configuration or nubs during runtime without restarting tron. Misc.cfg.flush() nubs = list(cmd.argDict.keys())[1:] if len(nubs) == 0: cmd.fail('text="must specify one or more nubs to start..."') return for nub in nubs: try: cmd.inform('text=%s' % (Misc.qstr('(re-)starting nub %s' % (nub)))) hub.startNub(nub) except Exception as e: cmd.warn('text=%s' % (Misc.qstr('failed to start nub %s: %s' % (nub, e)))) cmd.finish('') def actorInfo(self, cmd): """ Get gory status about a list of actor nubs. """ # Query all actors if none are specified. names = list(cmd.argDict.keys())[1:] if len(names) == 0: names = list(g.actors.keys()) for n in names: try: nub = g.actors[n] nub.statusCmd(cmd, doFinish=False) except Exception as e: cmd.warn('text=%s' % (Misc.qstr('failed to query actor %s: %s' % (n, e)))) cmd.finish('') def commandInfo(self, cmd): """ Get gory status about a list of actor nubs. """ # Query all actors if none are specified. names = list(cmd.argDict.keys())[1:] if len(names) == 0: names = list(g.actors.keys()) for n in names: try: nub = g.actors[n] nub.listCommandsCmd(cmd, doFinish=False) except Exception as e: cmd.warn('text=%s' % (Misc.qstr('failed to query actor %s: %s' % (n, e)))) cmd.finish('') def loadWords(self, cmd, finish=True): """ (re-)load an internal vocabulary word. """ words = list(cmd.argDict.keys())[1:] if len(words) == 0: words = None Misc.log('hubCmd', 'loadWords loading %s' % (words)) try: hub.loadWords(words) except Exception as e: Misc.tback('hub.loadWords', e) cmd.fail('text=%s' % (Misc.qstr(e))) return if finish: cmd.finish() def getKeys(self, cmd): """ Return a bunch of keys for a given source. Cmd args: src - a key source name. keys - 1 or more key names. """ words = cmd.cmd.split() if len(words) < 3: cmd.fail('text="usage: getKeys srcName key1 [key2 ... keyN]"') return src = words[1] keys = words[2:] matched, unmatched = g.KVs.getValues(src, keys) Misc.log('hub.getKeys', 'matched=%s unmatched=%s' % (matched, unmatched)) for k, v in matched.items(): kvString = kvAsASCII(k, v) cmd.inform(kvString, src='hub.%s' % (src)) if unmatched: cmd.warn('text=%s' % (Misc.qstr('unmatched %s keys: %s' % (src, ', '.join(unmatched))))) cmd.finish('') def reallyReallyRestart(self, cmd): """ Restart the entire MC. Which among other things kills us now. """ cmd.warn('text=%s' % (Misc.qstr('Restarting the hub now... bye, bye, and please call back soon!'))) # Give the poller a chance to flush out the warning. g.poller.callMeIn(hub.restart, 1.0) def relog(self, cmd): """ Change where stderr goes to. """ args = cmd.cmd.split() args = args[1:] if len(args) != 1: cmd.fail('cmdError="usage: relog filename"') return filename = args[0] import os f = open(filename, 'a', 1) os.dup2(f.fileno(), 1) os.dup2(f.fileno(), 2) sys.stdout = os.fdopen(1, 'w', 1) sys.stderr = os.fdopen(2, 'w', 1) f.close() cmd.finish('text="Jeebus, you done it now, whatever it was"')
30.830258
95
0.518971
__all__ = ['hubCommands'] import sys import Vocab.InternalCmd as InternalCmd from tron import Misc, g, hub from tron.Hub.KV.KVDict import kvAsASCII class hubCommands(InternalCmd.InternalCmd): def __init__(self, **argv): argv['safeCmds'] = r'^\s*(actors|commanders|actorInfo|version|status|ping)\s*$' argv['needsAuth'] = True InternalCmd.InternalCmd.__init__(self, 'hub', **argv) self.commands = { 'actors': self.actors, 'commanders': self.commanders, 'restart!': self.reallyReallyRestart, 'startNubs': self.startNubs, 'stopNubs': self.stopNubs, 'actorInfo': self.actorInfo, 'commands': self.commandInfo, 'setUsername': self.setUsername, 'status': self.status, 'loadWords': self.loadWords, 'getKeys': self.getKeys, 'listen': self.doListen, 'version': self.version, 'ping': self.status, 'relog': self.relog, } def version(self, cmd, finish=True): hub.getSetHubVersion() vString = 'version=%s' % (g.KVs.getKV('hub', 'version', default='Unknown')) if finish: cmd.finish(vString) else: cmd.inform(vString) def doListen(self, cmd): matched, unmatched, leftovers = cmd.match([('listen', None), ('addActors', None), ('delActors', None)]) cmdr = cmd.cmdr() if not cmdr: cmd.fail('debug=%s' % (Misc.qstr('cmdr=%s; cmd=%s' % (cmdr, cmd)))) return Misc.log('doListen', 'start: %s' % (cmdr.taster)) Misc.log('doListen', 'leftovers: %s' % (leftovers)) if 'addActors' in matched: actors = list(leftovers.keys()) Misc.log('doListen', 'addActors: %s' % (actors)) cmdr.taster.addToFilter(actors, [], actors) cmd.finish() elif 'delActors' in matched: actors = list(leftovers.keys()) Misc.log('doListen', 'delActors: %s' % (actors)) cmdr.taster.removeFromFilter(actors, [], actors) cmd.finish() else: cmd.fail('text="unknown listen command"') Misc.log('doListen', 'finish: %s' % (cmdr.taster)) def actors(self, cmd, finish=True): g.actors.listSelf(cmd=cmd) if finish: cmd.finish('') def commanders(self, cmd, finish=True): g.commanders.listSelf(cmd=cmd) if finish: cmd.finish('') def status(self, cmd, finish=True): Misc.cfg.flush() self.version(cmd, finish=False) self.actors(cmd, finish=False) self.commanders(cmd, finish=False) if finish: cmd.finish('') def setUsername(self, cmd): args = cmd.cmd.split() args = args[1:] if len(args) != 1: cmd.fail('cmdError="usage: setUsername newname"') return username = args[0] cmdr = cmd.cmdr() cmdr.setName(username) cmd.finish('') def stopNubs(self, cmd): nubs = list(cmd.argDict.keys())[1:] if len(nubs) == 0: cmd.fail('text="must specify one or more nubs to stop..."') return for nub in nubs: try: cmd.inform('text=%s' % (Misc.qstr('stopping nub %s' % (nub)))) hub.stopNub(nub) except Exception as e: cmd.warn('text=%s' % (Misc.qstr('failed to stop nub %s: %s' % (nub, e)))) cmd.finish('') def startNubs(self, cmd): Misc.cfg.flush() nubs = list(cmd.argDict.keys())[1:] if len(nubs) == 0: cmd.fail('text="must specify one or more nubs to start..."') return for nub in nubs: try: cmd.inform('text=%s' % (Misc.qstr('(re-)starting nub %s' % (nub)))) hub.startNub(nub) except Exception as e: cmd.warn('text=%s' % (Misc.qstr('failed to start nub %s: %s' % (nub, e)))) cmd.finish('') def actorInfo(self, cmd): names = list(cmd.argDict.keys())[1:] if len(names) == 0: names = list(g.actors.keys()) for n in names: try: nub = g.actors[n] nub.statusCmd(cmd, doFinish=False) except Exception as e: cmd.warn('text=%s' % (Misc.qstr('failed to query actor %s: %s' % (n, e)))) cmd.finish('') def commandInfo(self, cmd): names = list(cmd.argDict.keys())[1:] if len(names) == 0: names = list(g.actors.keys()) for n in names: try: nub = g.actors[n] nub.listCommandsCmd(cmd, doFinish=False) except Exception as e: cmd.warn('text=%s' % (Misc.qstr('failed to query actor %s: %s' % (n, e)))) cmd.finish('') def loadWords(self, cmd, finish=True): words = list(cmd.argDict.keys())[1:] if len(words) == 0: words = None Misc.log('hubCmd', 'loadWords loading %s' % (words)) try: hub.loadWords(words) except Exception as e: Misc.tback('hub.loadWords', e) cmd.fail('text=%s' % (Misc.qstr(e))) return if finish: cmd.finish() def getKeys(self, cmd): words = cmd.cmd.split() if len(words) < 3: cmd.fail('text="usage: getKeys srcName key1 [key2 ... keyN]"') return src = words[1] keys = words[2:] matched, unmatched = g.KVs.getValues(src, keys) Misc.log('hub.getKeys', 'matched=%s unmatched=%s' % (matched, unmatched)) for k, v in matched.items(): kvString = kvAsASCII(k, v) cmd.inform(kvString, src='hub.%s' % (src)) if unmatched: cmd.warn('text=%s' % (Misc.qstr('unmatched %s keys: %s' % (src, ', '.join(unmatched))))) cmd.finish('') def reallyReallyRestart(self, cmd): cmd.warn('text=%s' % (Misc.qstr('Restarting the hub now... bye, bye, and please call back soon!'))) g.poller.callMeIn(hub.restart, 1.0) def relog(self, cmd): args = cmd.cmd.split() args = args[1:] if len(args) != 1: cmd.fail('cmdError="usage: relog filename"') return filename = args[0] import os f = open(filename, 'a', 1) os.dup2(f.fileno(), 1) os.dup2(f.fileno(), 2) sys.stdout = os.fdopen(1, 'w', 1) sys.stderr = os.fdopen(2, 'w', 1) f.close() cmd.finish('text="Jeebus, you done it now, whatever it was"')
true
true
f7247748c768db7db41eded5f66dc4d97b47480c
964
py
Python
mats/model/components/gmm2d.py
StanfordASL/MATS
b31a86eb56728fc6025c71c7202ab425b078e3e5
[ "MIT" ]
21
2020-12-03T05:27:19.000Z
2022-01-18T02:24:22.000Z
mats/model/components/gmm2d.py
StanfordASL/MATS
b31a86eb56728fc6025c71c7202ab425b078e3e5
[ "MIT" ]
1
2022-03-29T14:51:51.000Z
2022-03-29T14:51:51.000Z
mats/model/components/gmm2d.py
StanfordASL/MATS
b31a86eb56728fc6025c71c7202ab425b078e3e5
[ "MIT" ]
5
2021-01-09T18:12:47.000Z
2022-03-22T11:45:56.000Z
import torch import torch.distributions as td class GMM2D(td.MixtureSameFamily): def __init__(self, mixture_distribution, component_distribution): super(GMM2D, self).__init__(mixture_distribution, component_distribution) def mode_mode(self): mode_k = torch.argmax(self.mixture_distribution.probs[0, 0]).item() mode_gaussian = self.component_distribution.mean[:, 0, mode_k, :2] return mode_gaussian def position_log_prob(self, x): # Computing the log probability over only the positions. component_dist = td.MultivariateNormal(loc=self.component_distribution.mean[..., :2], scale_tril=self.component_distribution.scale_tril[..., :2, :2]) position_dist = td.MixtureSameFamily(self.mixture_distribution, component_dist) return position_dist.log_prob(x) @property def pis(self): return self.mixture_distribution.probs[0, 0]
40.166667
110
0.691909
import torch import torch.distributions as td class GMM2D(td.MixtureSameFamily): def __init__(self, mixture_distribution, component_distribution): super(GMM2D, self).__init__(mixture_distribution, component_distribution) def mode_mode(self): mode_k = torch.argmax(self.mixture_distribution.probs[0, 0]).item() mode_gaussian = self.component_distribution.mean[:, 0, mode_k, :2] return mode_gaussian def position_log_prob(self, x): component_dist = td.MultivariateNormal(loc=self.component_distribution.mean[..., :2], scale_tril=self.component_distribution.scale_tril[..., :2, :2]) position_dist = td.MixtureSameFamily(self.mixture_distribution, component_dist) return position_dist.log_prob(x) @property def pis(self): return self.mixture_distribution.probs[0, 0]
true
true
f72477b35e362dbce3d14f182fe8455bd0352f5f
3,916
py
Python
ironic/objects/base.py
hpproliant/ironic
4f62cd97196b2a0068700ffb17456912147778d0
[ "Apache-2.0" ]
null
null
null
ironic/objects/base.py
hpproliant/ironic
4f62cd97196b2a0068700ffb17456912147778d0
[ "Apache-2.0" ]
null
null
null
ironic/objects/base.py
hpproliant/ironic
4f62cd97196b2a0068700ffb17456912147778d0
[ "Apache-2.0" ]
null
null
null
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Ironic common internal object model""" from oslo_versionedobjects import base as object_base from ironic.objects import fields as object_fields class IronicObjectRegistry(object_base.VersionedObjectRegistry): pass class IronicObject(object_base.VersionedObject): """Base class and object factory. This forms the base of all objects that can be remoted or instantiated via RPC. Simply defining a class that inherits from this base class will make it remotely instantiatable. Objects should implement the necessary "get" classmethod routines as well as "save" object methods as appropriate. """ OBJ_SERIAL_NAMESPACE = 'ironic_object' OBJ_PROJECT_NAMESPACE = 'ironic' # TODO(lintan) Refactor these fields and create PersistentObject and # TimeStampObject like Nova when it is necessary. fields = { 'created_at': object_fields.DateTimeField(nullable=True), 'updated_at': object_fields.DateTimeField(nullable=True), } def as_dict(self): return dict((k, getattr(self, k)) for k in self.fields if hasattr(self, k)) def obj_refresh(self, loaded_object): """Applies updates for objects that inherit from base.IronicObject. Checks for updated attributes in an object. Updates are applied from the loaded object column by column in comparison with the current object. """ for field in self.fields: if (self.obj_attr_is_set(field) and self[field] != loaded_object[field]): self[field] = loaded_object[field] class IronicObjectIndirectionAPI(object_base.VersionedObjectIndirectionAPI): def __init__(self): super(IronicObjectIndirectionAPI, self).__init__() # FIXME(xek): importing here due to a cyclical import error from ironic.conductor import rpcapi as conductor_api self._conductor = conductor_api.ConductorAPI() def object_action(self, context, objinst, objmethod, args, kwargs): return self._conductor.object_action(context, objinst, objmethod, args, kwargs) def object_class_action(self, context, objname, objmethod, objver, args, kwargs): # NOTE(xek): This method is implemented for compatibility with # oslo.versionedobjects 0.10.0 and older. It will be replaced by # object_class_action_versions. versions = object_base.obj_tree_get_versions(objname) return self.object_class_action_versions( context, objname, objmethod, versions, args, kwargs) def object_class_action_versions(self, context, objname, objmethod, object_versions, args, kwargs): return self._conductor.object_class_action_versions( context, objname, objmethod, object_versions, args, kwargs) def object_backport_versions(self, context, objinst, object_versions): return self._conductor.object_backport_versions(context, objinst, object_versions) class IronicObjectSerializer(object_base.VersionedObjectSerializer): # Base class to use for object hydration OBJ_BASE_CLASS = IronicObject
40.371134
78
0.687692
from oslo_versionedobjects import base as object_base from ironic.objects import fields as object_fields class IronicObjectRegistry(object_base.VersionedObjectRegistry): pass class IronicObject(object_base.VersionedObject): OBJ_SERIAL_NAMESPACE = 'ironic_object' OBJ_PROJECT_NAMESPACE = 'ironic' fields = { 'created_at': object_fields.DateTimeField(nullable=True), 'updated_at': object_fields.DateTimeField(nullable=True), } def as_dict(self): return dict((k, getattr(self, k)) for k in self.fields if hasattr(self, k)) def obj_refresh(self, loaded_object): for field in self.fields: if (self.obj_attr_is_set(field) and self[field] != loaded_object[field]): self[field] = loaded_object[field] class IronicObjectIndirectionAPI(object_base.VersionedObjectIndirectionAPI): def __init__(self): super(IronicObjectIndirectionAPI, self).__init__() from ironic.conductor import rpcapi as conductor_api self._conductor = conductor_api.ConductorAPI() def object_action(self, context, objinst, objmethod, args, kwargs): return self._conductor.object_action(context, objinst, objmethod, args, kwargs) def object_class_action(self, context, objname, objmethod, objver, args, kwargs): versions = object_base.obj_tree_get_versions(objname) return self.object_class_action_versions( context, objname, objmethod, versions, args, kwargs) def object_class_action_versions(self, context, objname, objmethod, object_versions, args, kwargs): return self._conductor.object_class_action_versions( context, objname, objmethod, object_versions, args, kwargs) def object_backport_versions(self, context, objinst, object_versions): return self._conductor.object_backport_versions(context, objinst, object_versions) class IronicObjectSerializer(object_base.VersionedObjectSerializer): OBJ_BASE_CLASS = IronicObject
true
true
f7247810b61f545dbbf06766dccc172c15e03ef6
62,418
py
Python
superset/db_engine_specs.py
emacip/incubator-superset
594cd7096070a742209851ff9112d5bf4d16a7be
[ "Apache-2.0", "CC-BY-4.0", "MIT" ]
1
2019-02-05T04:53:37.000Z
2019-02-05T04:53:37.000Z
superset/db_engine_specs.py
mankoven/incubator-superset
bab7ee7ecf222250287e591d91b38be583c9a2f3
[ "Apache-2.0", "CC-BY-4.0", "MIT" ]
7
2021-02-02T23:08:19.000Z
2022-03-29T22:28:16.000Z
superset/db_engine_specs.py
g4brielvs/incubator-superset
83ee9178328c5193808fe356ceb3090a299477f6
[ "Apache-2.0", "CC-BY-4.0", "MIT" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=C,R,W """Compatibility layer for different database engines This modules stores logic specific to different database engines. Things like time-related functions that are similar but not identical, or information as to expose certain features or not and how to expose them. For instance, Hive/Presto supports partitions and have a specific API to list partitions. Other databases like Vertica also support partitions but have different API to get to them. Other databases don't support partitions at all. The classes here will use a common interface to specify all this. The general idea is to use static classes and an inheritance scheme. """ from collections import namedtuple import hashlib import inspect import logging import os import re import textwrap import time from flask import g from flask_babel import lazy_gettext as _ import pandas import sqlalchemy as sqla from sqlalchemy import Column, select from sqlalchemy.engine import create_engine from sqlalchemy.engine.url import make_url from sqlalchemy.sql import quoted_name, text from sqlalchemy.sql.expression import TextAsFrom import sqlparse from werkzeug.utils import secure_filename from superset import app, conf, db, sql_parse from superset.exceptions import SupersetTemplateException from superset.utils import core as utils QueryStatus = utils.QueryStatus config = app.config tracking_url_trans = conf.get('TRACKING_URL_TRANSFORMER') hive_poll_interval = conf.get('HIVE_POLL_INTERVAL') Grain = namedtuple('Grain', 'name label function duration') builtin_time_grains = { None: 'Time Column', 'PT1S': 'second', 'PT1M': 'minute', 'PT5M': '5 minute', 'PT10M': '10 minute', 'PT15M': '15 minute', 'PT0.5H': 'half hour', 'PT1H': 'hour', 'P1D': 'day', 'P1W': 'week', 'P1M': 'month', 'P0.25Y': 'quarter', 'P1Y': 'year', '1969-12-28T00:00:00Z/P1W': 'week_start_sunday', '1969-12-29T00:00:00Z/P1W': 'week_start_monday', 'P1W/1970-01-03T00:00:00Z': 'week_ending_saturday', 'P1W/1970-01-04T00:00:00Z': 'week_ending_sunday', } def _create_time_grains_tuple(time_grains, time_grain_functions, blacklist): ret_list = [] blacklist = blacklist if blacklist else [] for duration, func in time_grain_functions.items(): if duration not in blacklist: name = time_grains.get(duration) ret_list.append(Grain(name, _(name), func, duration)) return tuple(ret_list) class LimitMethod(object): """Enum the ways that limits can be applied""" FETCH_MANY = 'fetch_many' WRAP_SQL = 'wrap_sql' FORCE_LIMIT = 'force_limit' class BaseEngineSpec(object): """Abstract class for database engine specific configurations""" engine = 'base' # str as defined in sqlalchemy.engine.engine time_grain_functions = {} time_groupby_inline = False limit_method = LimitMethod.FORCE_LIMIT time_secondary_columns = False inner_joins = True allows_subquery = True force_column_alias_quotes = False arraysize = None @classmethod def get_time_grains(cls): blacklist = config.get('TIME_GRAIN_BLACKLIST', []) grains = builtin_time_grains.copy() grains.update(config.get('TIME_GRAIN_ADDONS', {})) grain_functions = cls.time_grain_functions.copy() grain_addon_functions = config.get('TIME_GRAIN_ADDON_FUNCTIONS', {}) grain_functions.update(grain_addon_functions.get(cls.engine, {})) return _create_time_grains_tuple(grains, grain_functions, blacklist) @classmethod def fetch_data(cls, cursor, limit): if cls.arraysize: cursor.arraysize = cls.arraysize if cls.limit_method == LimitMethod.FETCH_MANY: return cursor.fetchmany(limit) return cursor.fetchall() @classmethod def epoch_to_dttm(cls): raise NotImplementedError() @classmethod def epoch_ms_to_dttm(cls): return cls.epoch_to_dttm().replace('{col}', '({col}/1000.000)') @classmethod def get_datatype(cls, type_code): if isinstance(type_code, str) and len(type_code): return type_code.upper() @classmethod def extra_table_metadata(cls, database, table_name, schema_name): """Returns engine-specific table metadata""" return {} @classmethod def apply_limit_to_sql(cls, sql, limit, database): """Alters the SQL statement to apply a LIMIT clause""" if cls.limit_method == LimitMethod.WRAP_SQL: sql = sql.strip('\t\n ;') qry = ( select('*') .select_from( TextAsFrom(text(sql), ['*']).alias('inner_qry'), ) .limit(limit) ) return database.compile_sqla_query(qry) elif LimitMethod.FORCE_LIMIT: parsed_query = sql_parse.ParsedQuery(sql) sql = parsed_query.get_query_with_new_limit(limit) return sql @classmethod def get_limit_from_sql(cls, sql): parsed_query = sql_parse.ParsedQuery(sql) return parsed_query.limit @classmethod def get_query_with_new_limit(cls, sql, limit): parsed_query = sql_parse.ParsedQuery(sql) return parsed_query.get_query_with_new_limit(limit) @staticmethod def csv_to_df(**kwargs): kwargs['filepath_or_buffer'] = \ config['UPLOAD_FOLDER'] + kwargs['filepath_or_buffer'] kwargs['encoding'] = 'utf-8' kwargs['iterator'] = True chunks = pandas.read_csv(**kwargs) df = pandas.DataFrame() df = pandas.concat(chunk for chunk in chunks) return df @staticmethod def df_to_db(df, table, **kwargs): df.to_sql(**kwargs) table.user_id = g.user.id table.schema = kwargs['schema'] table.fetch_metadata() db.session.add(table) db.session.commit() @staticmethod def create_table_from_csv(form, table): def _allowed_file(filename): # Only allow specific file extensions as specified in the config extension = os.path.splitext(filename)[1] return extension and extension[1:] in config['ALLOWED_EXTENSIONS'] filename = secure_filename(form.csv_file.data.filename) if not _allowed_file(filename): raise Exception('Invalid file type selected') kwargs = { 'filepath_or_buffer': filename, 'sep': form.sep.data, 'header': form.header.data if form.header.data else 0, 'index_col': form.index_col.data, 'mangle_dupe_cols': form.mangle_dupe_cols.data, 'skipinitialspace': form.skipinitialspace.data, 'skiprows': form.skiprows.data, 'nrows': form.nrows.data, 'skip_blank_lines': form.skip_blank_lines.data, 'parse_dates': form.parse_dates.data, 'infer_datetime_format': form.infer_datetime_format.data, 'chunksize': 10000, } df = BaseEngineSpec.csv_to_df(**kwargs) df_to_db_kwargs = { 'table': table, 'df': df, 'name': form.name.data, 'con': create_engine(form.con.data.sqlalchemy_uri_decrypted, echo=False), 'schema': form.schema.data, 'if_exists': form.if_exists.data, 'index': form.index.data, 'index_label': form.index_label.data, 'chunksize': 10000, } BaseEngineSpec.df_to_db(**df_to_db_kwargs) @classmethod def convert_dttm(cls, target_type, dttm): return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) @classmethod def fetch_result_sets(cls, db, datasource_type): """Returns a list of tables [schema1.table1, schema2.table2, ...] Datasource_type can be 'table' or 'view'. Empty schema corresponds to the list of full names of the all tables or views: <schema>.<result_set_name>. """ schemas = db.all_schema_names(cache=db.schema_cache_enabled, cache_timeout=db.schema_cache_timeout, force=True) all_result_sets = [] for schema in schemas: if datasource_type == 'table': all_datasource_names = db.all_table_names_in_schema( schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout) elif datasource_type == 'view': all_datasource_names = db.all_view_names_in_schema( schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout) all_result_sets += [ '{}.{}'.format(schema, t) for t in all_datasource_names] return all_result_sets @classmethod def handle_cursor(cls, cursor, query, session): """Handle a live cursor between the execute and fetchall calls The flow works without this method doing anything, but it allows for handling the cursor and updating progress information in the query object""" pass @classmethod def extract_error_message(cls, e): """Extract error message for queries""" return utils.error_msg_from_exception(e) @classmethod def adjust_database_uri(cls, uri, selected_schema): """Based on a URI and selected schema, return a new URI The URI here represents the URI as entered when saving the database, ``selected_schema`` is the schema currently active presumably in the SQL Lab dropdown. Based on that, for some database engine, we can return a new altered URI that connects straight to the active schema, meaning the users won't have to prefix the object names by the schema name. Some databases engines have 2 level of namespacing: database and schema (postgres, oracle, mssql, ...) For those it's probably better to not alter the database component of the URI with the schema name, it won't work. Some database drivers like presto accept '{catalog}/{schema}' in the database component of the URL, that can be handled here. """ return uri @classmethod def patch(cls): pass @classmethod def get_schema_names(cls, inspector): return sorted(inspector.get_schema_names()) @classmethod def get_table_names(cls, inspector, schema): return sorted(inspector.get_table_names(schema)) @classmethod def get_view_names(cls, inspector, schema): return sorted(inspector.get_view_names(schema)) @classmethod def where_latest_partition( cls, table_name, schema, database, qry, columns=None): return False @classmethod def _get_fields(cls, cols): return [sqla.column(c.get('name')) for c in cols] @classmethod def select_star(cls, my_db, table_name, engine, schema=None, limit=100, show_cols=False, indent=True, latest_partition=True, cols=None): fields = '*' cols = cols or [] if (show_cols or latest_partition) and not cols: cols = my_db.get_columns(table_name, schema) if show_cols: fields = cls._get_fields(cols) quote = engine.dialect.identifier_preparer.quote if schema: full_table_name = quote(schema) + '.' + quote(table_name) else: full_table_name = quote(table_name) qry = select(fields).select_from(text(full_table_name)) if limit: qry = qry.limit(limit) if latest_partition: partition_query = cls.where_latest_partition( table_name, schema, my_db, qry, columns=cols) if partition_query != False: # noqa qry = partition_query sql = my_db.compile_sqla_query(qry) if indent: sql = sqlparse.format(sql, reindent=True) return sql @classmethod def modify_url_for_impersonation(cls, url, impersonate_user, username): """ Modify the SQL Alchemy URL object with the user to impersonate if applicable. :param url: SQLAlchemy URL object :param impersonate_user: Bool indicating if impersonation is enabled :param username: Effective username """ if impersonate_user is not None and username is not None: url.username = username @classmethod def get_configuration_for_impersonation(cls, uri, impersonate_user, username): """ Return a configuration dictionary that can be merged with other configs that can set the correct properties for impersonating users :param uri: URI string :param impersonate_user: Bool indicating if impersonation is enabled :param username: Effective username :return: Dictionary with configs required for impersonation """ return {} @classmethod def execute(cls, cursor, query, **kwargs): if cls.arraysize: cursor.arraysize = cls.arraysize cursor.execute(query) @classmethod def make_label_compatible(cls, label): """ Conditionally mutate and/or quote a sql column/expression label. If force_column_alias_quotes is set to True, return the label as a sqlalchemy.sql.elements.quoted_name object to ensure that the select query and query results have same case. Otherwise return the mutated label as a regular string. """ label = cls.mutate_label(label) return quoted_name(label, True) if cls.force_column_alias_quotes else label @staticmethod def mutate_label(label): """ Most engines support mixed case aliases that can include numbers and special characters, like commas, parentheses etc. For engines that have restrictions on what types of aliases are supported, this method can be overridden to ensure that labels conform to the engine's limitations. Mutated labels should be deterministic (input label A always yields output label X) and unique (input labels A and B don't yield the same output label X). """ return label class PostgresBaseEngineSpec(BaseEngineSpec): """ Abstract class for Postgres 'like' databases """ engine = '' time_grain_functions = { None: '{col}', 'PT1S': "DATE_TRUNC('second', {col}) AT TIME ZONE 'UTC'", 'PT1M': "DATE_TRUNC('minute', {col}) AT TIME ZONE 'UTC'", 'PT1H': "DATE_TRUNC('hour', {col}) AT TIME ZONE 'UTC'", 'P1D': "DATE_TRUNC('day', {col}) AT TIME ZONE 'UTC'", 'P1W': "DATE_TRUNC('week', {col}) AT TIME ZONE 'UTC'", 'P1M': "DATE_TRUNC('month', {col}) AT TIME ZONE 'UTC'", 'P0.25Y': "DATE_TRUNC('quarter', {col}) AT TIME ZONE 'UTC'", 'P1Y': "DATE_TRUNC('year', {col}) AT TIME ZONE 'UTC'", } @classmethod def fetch_data(cls, cursor, limit): if not cursor.description: return [] if cls.limit_method == LimitMethod.FETCH_MANY: return cursor.fetchmany(limit) return cursor.fetchall() @classmethod def epoch_to_dttm(cls): return "(timestamp 'epoch' + {col} * interval '1 second')" @classmethod def convert_dttm(cls, target_type, dttm): return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) class PostgresEngineSpec(PostgresBaseEngineSpec): engine = 'postgresql' @classmethod def get_table_names(cls, inspector, schema): """Need to consider foreign tables for PostgreSQL""" tables = inspector.get_table_names(schema) tables.extend(inspector.get_foreign_table_names(schema)) return sorted(tables) class SnowflakeEngineSpec(PostgresBaseEngineSpec): engine = 'snowflake' force_column_alias_quotes = True time_grain_functions = { None: '{col}', 'PT1S': "DATE_TRUNC('SECOND', {col})", 'PT1M': "DATE_TRUNC('MINUTE', {col})", 'PT5M': "DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 5) * 5, \ DATE_TRUNC('HOUR', {col}))", 'PT10M': "DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 10) * 10, \ DATE_TRUNC('HOUR', {col}))", 'PT15M': "DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 15) * 15, \ DATE_TRUNC('HOUR', {col}))", 'PT0.5H': "DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 30) * 30, \ DATE_TRUNC('HOUR', {col}))", 'PT1H': "DATE_TRUNC('HOUR', {col})", 'P1D': "DATE_TRUNC('DAY', {col})", 'P1W': "DATE_TRUNC('WEEK', {col})", 'P1M': "DATE_TRUNC('MONTH', {col})", 'P0.25Y': "DATE_TRUNC('QUARTER', {col})", 'P1Y': "DATE_TRUNC('YEAR', {col})", } @classmethod def adjust_database_uri(cls, uri, selected_schema=None): database = uri.database if '/' in uri.database: database = uri.database.split('/')[0] if selected_schema: uri.database = database + '/' + selected_schema return uri class VerticaEngineSpec(PostgresBaseEngineSpec): engine = 'vertica' class RedshiftEngineSpec(PostgresBaseEngineSpec): engine = 'redshift' @staticmethod def mutate_label(label): """ Redshift only supports lowercase column names and aliases. :param str label: Original label which might include uppercase letters :return: String that is supported by the database """ return label.lower() class OracleEngineSpec(PostgresBaseEngineSpec): engine = 'oracle' limit_method = LimitMethod.WRAP_SQL force_column_alias_quotes = True time_grain_functions = { None: '{col}', 'PT1S': 'CAST({col} as DATE)', 'PT1M': "TRUNC(CAST({col} as DATE), 'MI')", 'PT1H': "TRUNC(CAST({col} as DATE), 'HH')", 'P1D': "TRUNC(CAST({col} as DATE), 'DDD')", 'P1W': "TRUNC(CAST({col} as DATE), 'WW')", 'P1M': "TRUNC(CAST({col} as DATE), 'MONTH')", 'P0.25Y': "TRUNC(CAST({col} as DATE), 'Q')", 'P1Y': "TRUNC(CAST({col} as DATE), 'YEAR')", } @classmethod def convert_dttm(cls, target_type, dttm): return ( """TO_TIMESTAMP('{}', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')""" ).format(dttm.isoformat()) @staticmethod def mutate_label(label): """ Oracle 12.1 and earlier support a maximum of 30 byte length object names, which usually means 30 characters. :param str label: Original label which might include unsupported characters :return: String that is supported by the database """ if len(label) > 30: hashed_label = hashlib.md5(label.encode('utf-8')).hexdigest() # truncate the hash to first 30 characters return hashed_label[:30] return label class Db2EngineSpec(BaseEngineSpec): engine = 'ibm_db_sa' limit_method = LimitMethod.WRAP_SQL force_column_alias_quotes = True time_grain_functions = { None: '{col}', 'PT1S': 'CAST({col} as TIMESTAMP)' ' - MICROSECOND({col}) MICROSECONDS', 'PT1M': 'CAST({col} as TIMESTAMP)' ' - SECOND({col}) SECONDS' ' - MICROSECOND({col}) MICROSECONDS', 'PT1H': 'CAST({col} as TIMESTAMP)' ' - MINUTE({col}) MINUTES' ' - SECOND({col}) SECONDS' ' - MICROSECOND({col}) MICROSECONDS ', 'P1D': 'CAST({col} as TIMESTAMP)' ' - HOUR({col}) HOURS' ' - MINUTE({col}) MINUTES' ' - SECOND({col}) SECONDS' ' - MICROSECOND({col}) MICROSECONDS', 'P1W': '{col} - (DAYOFWEEK({col})) DAYS', 'P1M': '{col} - (DAY({col})-1) DAYS', 'P0.25Y': '{col} - (DAY({col})-1) DAYS' ' - (MONTH({col})-1) MONTHS' ' + ((QUARTER({col})-1) * 3) MONTHS', 'P1Y': '{col} - (DAY({col})-1) DAYS' ' - (MONTH({col})-1) MONTHS', } @classmethod def epoch_to_dttm(cls): return "(TIMESTAMP('1970-01-01', '00:00:00') + {col} SECONDS)" @classmethod def convert_dttm(cls, target_type, dttm): return "'{}'".format(dttm.strftime('%Y-%m-%d-%H.%M.%S')) @staticmethod def mutate_label(label): """ Db2 for z/OS supports a maximum of 30 byte length object names, which usually means 30 characters. :param str label: Original label which might include unsupported characters :return: String that is supported by the database """ if len(label) > 30: hashed_label = hashlib.md5(label.encode('utf-8')).hexdigest() # truncate the hash to first 30 characters return hashed_label[:30] return label class SqliteEngineSpec(BaseEngineSpec): engine = 'sqlite' time_grain_functions = { None: '{col}', 'PT1H': "DATETIME(STRFTIME('%Y-%m-%dT%H:00:00', {col}))", 'P1D': 'DATE({col})', 'P1W': "DATE({col}, -strftime('%W', {col}) || ' days')", 'P1M': "DATE({col}, -strftime('%d', {col}) || ' days', '+1 day')", 'P1Y': "DATETIME(STRFTIME('%Y-01-01T00:00:00', {col}))", 'P1W/1970-01-03T00:00:00Z': "DATE({col}, 'weekday 6')", '1969-12-28T00:00:00Z/P1W': "DATE({col}, 'weekday 0', '-7 days')", } @classmethod def epoch_to_dttm(cls): return "datetime({col}, 'unixepoch')" @classmethod def fetch_result_sets(cls, db, datasource_type): schemas = db.all_schema_names(cache=db.schema_cache_enabled, cache_timeout=db.schema_cache_timeout, force=True) all_result_sets = [] schema = schemas[0] if datasource_type == 'table': all_datasource_names = db.all_table_names_in_schema( schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout) elif datasource_type == 'view': all_datasource_names = db.all_view_names_in_schema( schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout) all_result_sets += [ '{}.{}'.format(schema, t) for t in all_datasource_names] return all_result_sets @classmethod def convert_dttm(cls, target_type, dttm): iso = dttm.isoformat().replace('T', ' ') if '.' not in iso: iso += '.000000' return "'{}'".format(iso) @classmethod def get_table_names(cls, inspector, schema): """Need to disregard the schema for Sqlite""" return sorted(inspector.get_table_names()) class MySQLEngineSpec(BaseEngineSpec): engine = 'mysql' time_grain_functions = { None: '{col}', 'PT1S': 'DATE_ADD(DATE({col}), ' 'INTERVAL (HOUR({col})*60*60 + MINUTE({col})*60' ' + SECOND({col})) SECOND)', 'PT1M': 'DATE_ADD(DATE({col}), ' 'INTERVAL (HOUR({col})*60 + MINUTE({col})) MINUTE)', 'PT1H': 'DATE_ADD(DATE({col}), ' 'INTERVAL HOUR({col}) HOUR)', 'P1D': 'DATE({col})', 'P1W': 'DATE(DATE_SUB({col}, ' 'INTERVAL DAYOFWEEK({col}) - 1 DAY))', 'P1M': 'DATE(DATE_SUB({col}, ' 'INTERVAL DAYOFMONTH({col}) - 1 DAY))', 'P0.25Y': 'MAKEDATE(YEAR({col}), 1) ' '+ INTERVAL QUARTER({col}) QUARTER - INTERVAL 1 QUARTER', 'P1Y': 'DATE(DATE_SUB({col}, ' 'INTERVAL DAYOFYEAR({col}) - 1 DAY))', '1969-12-29T00:00:00Z/P1W': 'DATE(DATE_SUB({col}, ' 'INTERVAL DAYOFWEEK(DATE_SUB({col}, INTERVAL 1 DAY)) - 1 DAY))', } type_code_map = {} # loaded from get_datatype only if needed @classmethod def convert_dttm(cls, target_type, dttm): if target_type.upper() in ('DATETIME', 'DATE'): return "STR_TO_DATE('{}', '%Y-%m-%d %H:%i:%s')".format( dttm.strftime('%Y-%m-%d %H:%M:%S')) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) @classmethod def adjust_database_uri(cls, uri, selected_schema=None): if selected_schema: uri.database = selected_schema return uri @classmethod def get_datatype(cls, type_code): if not cls.type_code_map: # only import and store if needed at least once import MySQLdb ft = MySQLdb.constants.FIELD_TYPE cls.type_code_map = { getattr(ft, k): k for k in dir(ft) if not k.startswith('_') } datatype = type_code if isinstance(type_code, int): datatype = cls.type_code_map.get(type_code) if datatype and isinstance(datatype, str) and len(datatype): return datatype @classmethod def epoch_to_dttm(cls): return 'from_unixtime({col})' @classmethod def extract_error_message(cls, e): """Extract error message for queries""" message = str(e) try: if isinstance(e.args, tuple) and len(e.args) > 1: message = e.args[1] except Exception: pass return message class PrestoEngineSpec(BaseEngineSpec): engine = 'presto' time_grain_functions = { None: '{col}', 'PT1S': "date_trunc('second', CAST({col} AS TIMESTAMP))", 'PT1M': "date_trunc('minute', CAST({col} AS TIMESTAMP))", 'PT1H': "date_trunc('hour', CAST({col} AS TIMESTAMP))", 'P1D': "date_trunc('day', CAST({col} AS TIMESTAMP))", 'P1W': "date_trunc('week', CAST({col} AS TIMESTAMP))", 'P1M': "date_trunc('month', CAST({col} AS TIMESTAMP))", 'P0.25Y': "date_trunc('quarter', CAST({col} AS TIMESTAMP))", 'P1Y': "date_trunc('year', CAST({col} AS TIMESTAMP))", 'P1W/1970-01-03T00:00:00Z': "date_add('day', 5, date_trunc('week', date_add('day', 1, \ CAST({col} AS TIMESTAMP))))", '1969-12-28T00:00:00Z/P1W': "date_add('day', -1, date_trunc('week', \ date_add('day', 1, CAST({col} AS TIMESTAMP))))", } @classmethod def get_view_names(cls, inspector, schema): """Returns an empty list get_table_names() function returns all table names and view names, and get_view_names() is not implemented in sqlalchemy_presto.py https://github.com/dropbox/PyHive/blob/e25fc8440a0686bbb7a5db5de7cb1a77bdb4167a/pyhive/sqlalchemy_presto.py """ return [] @classmethod def adjust_database_uri(cls, uri, selected_schema=None): database = uri.database if selected_schema and database: if '/' in database: database = database.split('/')[0] + '/' + selected_schema else: database += '/' + selected_schema uri.database = database return uri @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "from_iso8601_date('{}')".format(dttm.isoformat()[:10]) if tt == 'TIMESTAMP': return "from_iso8601_timestamp('{}')".format(dttm.isoformat()) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) @classmethod def epoch_to_dttm(cls): return 'from_unixtime({col})' @classmethod def fetch_result_sets(cls, db, datasource_type): """Returns a list of tables [schema1.table1, schema2.table2, ...] Datasource_type can be 'table' or 'view'. Empty schema corresponds to the list of full names of the all tables or views: <schema>.<result_set_name>. """ result_set_df = db.get_df( """SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S ORDER BY concat(table_schema, '.', table_name)""".format( datasource_type.upper(), ), None) result_sets = [] for unused, row in result_set_df.iterrows(): result_sets.append('{}.{}'.format( row['table_schema'], row['table_name'])) return result_sets @classmethod def extra_table_metadata(cls, database, table_name, schema_name): indexes = database.get_indexes(table_name, schema_name) if not indexes: return {} cols = indexes[0].get('column_names', []) full_table_name = table_name if schema_name and '.' not in table_name: full_table_name = '{}.{}'.format(schema_name, table_name) pql = cls._partition_query(full_table_name) col_name, latest_part = cls.latest_partition( table_name, schema_name, database, show_first=True) return { 'partitions': { 'cols': cols, 'latest': {col_name: latest_part}, 'partitionQuery': pql, }, } @classmethod def handle_cursor(cls, cursor, query, session): """Updates progress information""" logging.info('Polling the cursor for progress') polled = cursor.poll() # poll returns dict -- JSON status information or ``None`` # if the query is done # https://github.com/dropbox/PyHive/blob/ # b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178 while polled: # Update the object and wait for the kill signal. stats = polled.get('stats', {}) query = session.query(type(query)).filter_by(id=query.id).one() if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]: cursor.cancel() break if stats: state = stats.get('state') # if already finished, then stop polling if state == 'FINISHED': break completed_splits = float(stats.get('completedSplits')) total_splits = float(stats.get('totalSplits')) if total_splits and completed_splits: progress = 100 * (completed_splits / total_splits) logging.info( 'Query progress: {} / {} ' 'splits'.format(completed_splits, total_splits)) if progress > query.progress: query.progress = progress session.commit() time.sleep(1) logging.info('Polling the cursor for progress') polled = cursor.poll() @classmethod def extract_error_message(cls, e): if ( hasattr(e, 'orig') and type(e.orig).__name__ == 'DatabaseError' and isinstance(e.orig[0], dict)): error_dict = e.orig[0] return '{} at {}: {}'.format( error_dict.get('errorName'), error_dict.get('errorLocation'), error_dict.get('message'), ) if ( type(e).__name__ == 'DatabaseError' and hasattr(e, 'args') and len(e.args) > 0 ): error_dict = e.args[0] return error_dict.get('message') return utils.error_msg_from_exception(e) @classmethod def _partition_query( cls, table_name, limit=0, order_by=None, filters=None): """Returns a partition query :param table_name: the name of the table to get partitions from :type table_name: str :param limit: the number of partitions to be returned :type limit: int :param order_by: a list of tuples of field name and a boolean that determines if that field should be sorted in descending order :type order_by: list of (str, bool) tuples :param filters: a list of filters to apply :param filters: dict of field name and filter value combinations """ limit_clause = 'LIMIT {}'.format(limit) if limit else '' order_by_clause = '' if order_by: l = [] # noqa: E741 for field, desc in order_by: l.append(field + ' DESC' if desc else '') order_by_clause = 'ORDER BY ' + ', '.join(l) where_clause = '' if filters: l = [] # noqa: E741 for field, value in filters.items(): l.append(f"{field} = '{value}'") where_clause = 'WHERE ' + ' AND '.join(l) sql = textwrap.dedent(f"""\ SHOW PARTITIONS FROM {table_name} {where_clause} {order_by_clause} {limit_clause} """) return sql @classmethod def where_latest_partition( cls, table_name, schema, database, qry, columns=None): try: col_name, value = cls.latest_partition( table_name, schema, database, show_first=True) except Exception: # table is not partitioned return False for c in columns: if c.get('name') == col_name: return qry.where(Column(col_name) == value) return False @classmethod def _latest_partition_from_df(cls, df): recs = df.to_records(index=False) if recs: return recs[0][0] @classmethod def latest_partition(cls, table_name, schema, database, show_first=False): """Returns col name and the latest (max) partition value for a table :param table_name: the name of the table :type table_name: str :param schema: schema / database / namespace :type schema: str :param database: database query will be run against :type database: models.Database :param show_first: displays the value for the first partitioning key if there are many partitioning keys :type show_first: bool >>> latest_partition('foo_table') '2018-01-01' """ indexes = database.get_indexes(table_name, schema) if len(indexes[0]['column_names']) < 1: raise SupersetTemplateException( 'The table should have one partitioned field') elif not show_first and len(indexes[0]['column_names']) > 1: raise SupersetTemplateException( 'The table should have a single partitioned field ' 'to use this function. You may want to use ' '`presto.latest_sub_partition`') part_field = indexes[0]['column_names'][0] sql = cls._partition_query(table_name, 1, [(part_field, True)]) df = database.get_df(sql, schema) return part_field, cls._latest_partition_from_df(df) @classmethod def latest_sub_partition(cls, table_name, schema, database, **kwargs): """Returns the latest (max) partition value for a table A filtering criteria should be passed for all fields that are partitioned except for the field to be returned. For example, if a table is partitioned by (``ds``, ``event_type`` and ``event_category``) and you want the latest ``ds``, you'll want to provide a filter as keyword arguments for both ``event_type`` and ``event_category`` as in ``latest_sub_partition('my_table', event_category='page', event_type='click')`` :param table_name: the name of the table, can be just the table name or a fully qualified table name as ``schema_name.table_name`` :type table_name: str :param schema: schema / database / namespace :type schema: str :param database: database query will be run against :type database: models.Database :param kwargs: keyword arguments define the filtering criteria on the partition list. There can be many of these. :type kwargs: str >>> latest_sub_partition('sub_partition_table', event_type='click') '2018-01-01' """ indexes = database.get_indexes(table_name, schema) part_fields = indexes[0]['column_names'] for k in kwargs.keys(): if k not in k in part_fields: msg = 'Field [{k}] is not part of the portioning key' raise SupersetTemplateException(msg) if len(kwargs.keys()) != len(part_fields) - 1: msg = ( 'A filter needs to be specified for {} out of the ' '{} fields.' ).format(len(part_fields) - 1, len(part_fields)) raise SupersetTemplateException(msg) for field in part_fields: if field not in kwargs.keys(): field_to_return = field sql = cls._partition_query( table_name, 1, [(field_to_return, True)], kwargs) df = database.get_df(sql, schema) if df.empty: return '' return df.to_dict()[field_to_return][0] class HiveEngineSpec(PrestoEngineSpec): """Reuses PrestoEngineSpec functionality.""" engine = 'hive' # Scoping regex at class level to avoid recompiling # 17/02/07 19:36:38 INFO ql.Driver: Total jobs = 5 jobs_stats_r = re.compile( r'.*INFO.*Total jobs = (?P<max_jobs>[0-9]+)') # 17/02/07 19:37:08 INFO ql.Driver: Launching Job 2 out of 5 launching_job_r = re.compile( '.*INFO.*Launching Job (?P<job_number>[0-9]+) out of ' '(?P<max_jobs>[0-9]+)') # 17/02/07 19:36:58 INFO exec.Task: 2017-02-07 19:36:58,152 Stage-18 # map = 0%, reduce = 0% stage_progress_r = re.compile( r'.*INFO.*Stage-(?P<stage_number>[0-9]+).*' r'map = (?P<map_progress>[0-9]+)%.*' r'reduce = (?P<reduce_progress>[0-9]+)%.*') @classmethod def patch(cls): from pyhive import hive # pylint: disable=no-name-in-module from superset.db_engines import hive as patched_hive from TCLIService import ( constants as patched_constants, ttypes as patched_ttypes, TCLIService as patched_TCLIService) hive.TCLIService = patched_TCLIService hive.constants = patched_constants hive.ttypes = patched_ttypes hive.Cursor.fetch_logs = patched_hive.fetch_logs @classmethod def fetch_result_sets(cls, db, datasource_type): return BaseEngineSpec.fetch_result_sets( db, datasource_type) @classmethod def fetch_data(cls, cursor, limit): import pyhive from TCLIService import ttypes state = cursor.poll() if state.operationState == ttypes.TOperationState.ERROR_STATE: raise Exception('Query error', state.errorMessage) try: return super(HiveEngineSpec, cls).fetch_data(cursor, limit) except pyhive.exc.ProgrammingError: return [] @staticmethod def create_table_from_csv(form, table): """Uploads a csv file and creates a superset datasource in Hive.""" def convert_to_hive_type(col_type): """maps tableschema's types to hive types""" tableschema_to_hive_types = { 'boolean': 'BOOLEAN', 'integer': 'INT', 'number': 'DOUBLE', 'string': 'STRING', } return tableschema_to_hive_types.get(col_type, 'STRING') bucket_path = config['CSV_TO_HIVE_UPLOAD_S3_BUCKET'] if not bucket_path: logging.info('No upload bucket specified') raise Exception( 'No upload bucket specified. You can specify one in the config file.') table_name = form.name.data schema_name = form.schema.data if config.get('UPLOADED_CSV_HIVE_NAMESPACE'): if '.' in table_name or schema_name: raise Exception( "You can't specify a namespace. " 'All tables will be uploaded to the `{}` namespace'.format( config.get('HIVE_NAMESPACE'))) full_table_name = '{}.{}'.format( config.get('UPLOADED_CSV_HIVE_NAMESPACE'), table_name) else: if '.' in table_name and schema_name: raise Exception( "You can't specify a namespace both in the name of the table " 'and in the schema field. Please remove one') full_table_name = '{}.{}'.format( schema_name, table_name) if schema_name else table_name filename = form.csv_file.data.filename upload_prefix = config['CSV_TO_HIVE_UPLOAD_DIRECTORY'] upload_path = config['UPLOAD_FOLDER'] + \ secure_filename(filename) # Optional dependency from tableschema import Table # pylint: disable=import-error hive_table_schema = Table(upload_path).infer() column_name_and_type = [] for column_info in hive_table_schema['fields']: column_name_and_type.append( '`{}` {}'.format( column_info['name'], convert_to_hive_type(column_info['type']))) schema_definition = ', '.join(column_name_and_type) # Optional dependency import boto3 # pylint: disable=import-error s3 = boto3.client('s3') location = os.path.join('s3a://', bucket_path, upload_prefix, table_name) s3.upload_file( upload_path, bucket_path, os.path.join(upload_prefix, table_name, filename)) sql = f"""CREATE TABLE {full_table_name} ( {schema_definition} ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE LOCATION '{location}' tblproperties ('skip.header.line.count'='1')""" logging.info(form.con.data) engine = create_engine(form.con.data.sqlalchemy_uri_decrypted) engine.execute(sql) @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "CAST('{}' AS DATE)".format(dttm.isoformat()[:10]) elif tt == 'TIMESTAMP': return "CAST('{}' AS TIMESTAMP)".format( dttm.strftime('%Y-%m-%d %H:%M:%S')) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) @classmethod def adjust_database_uri(cls, uri, selected_schema=None): if selected_schema: uri.database = selected_schema return uri @classmethod def extract_error_message(cls, e): msg = str(e) match = re.search(r'errorMessage="(.*?)(?<!\\)"', msg) if match: msg = match.group(1) return msg @classmethod def progress(cls, log_lines): total_jobs = 1 # assuming there's at least 1 job current_job = 1 stages = {} for line in log_lines: match = cls.jobs_stats_r.match(line) if match: total_jobs = int(match.groupdict()['max_jobs']) or 1 match = cls.launching_job_r.match(line) if match: current_job = int(match.groupdict()['job_number']) total_jobs = int(match.groupdict()['max_jobs']) or 1 stages = {} match = cls.stage_progress_r.match(line) if match: stage_number = int(match.groupdict()['stage_number']) map_progress = int(match.groupdict()['map_progress']) reduce_progress = int(match.groupdict()['reduce_progress']) stages[stage_number] = (map_progress + reduce_progress) / 2 logging.info( 'Progress detail: {}, ' 'current job {}, ' 'total jobs: {}'.format(stages, current_job, total_jobs)) stage_progress = sum( stages.values()) / len(stages.values()) if stages else 0 progress = ( 100 * (current_job - 1) / total_jobs + stage_progress / total_jobs ) return int(progress) @classmethod def get_tracking_url(cls, log_lines): lkp = 'Tracking URL = ' for line in log_lines: if lkp in line: return line.split(lkp)[1] @classmethod def handle_cursor(cls, cursor, query, session): """Updates progress information""" from pyhive import hive # pylint: disable=no-name-in-module unfinished_states = ( hive.ttypes.TOperationState.INITIALIZED_STATE, hive.ttypes.TOperationState.RUNNING_STATE, ) polled = cursor.poll() last_log_line = 0 tracking_url = None job_id = None while polled.operationState in unfinished_states: query = session.query(type(query)).filter_by(id=query.id).one() if query.status == QueryStatus.STOPPED: cursor.cancel() break log = cursor.fetch_logs() or '' if log: log_lines = log.splitlines() progress = cls.progress(log_lines) logging.info('Progress total: {}'.format(progress)) needs_commit = False if progress > query.progress: query.progress = progress needs_commit = True if not tracking_url: tracking_url = cls.get_tracking_url(log_lines) if tracking_url: job_id = tracking_url.split('/')[-2] logging.info( 'Found the tracking url: {}'.format(tracking_url)) tracking_url = tracking_url_trans(tracking_url) logging.info( 'Transformation applied: {}'.format(tracking_url)) query.tracking_url = tracking_url logging.info('Job id: {}'.format(job_id)) needs_commit = True if job_id and len(log_lines) > last_log_line: # Wait for job id before logging things out # this allows for prefixing all log lines and becoming # searchable in something like Kibana for l in log_lines[last_log_line:]: logging.info('[{}] {}'.format(job_id, l)) last_log_line = len(log_lines) if needs_commit: session.commit() time.sleep(hive_poll_interval) polled = cursor.poll() @classmethod def where_latest_partition( cls, table_name, schema, database, qry, columns=None): try: col_name, value = cls.latest_partition( table_name, schema, database, show_first=True) except Exception: # table is not partitioned return False for c in columns: if c.get('name') == col_name: return qry.where(Column(col_name) == value) return False @classmethod def latest_sub_partition(cls, table_name, schema, database, **kwargs): # TODO(bogdan): implement` pass @classmethod def _latest_partition_from_df(cls, df): """Hive partitions look like ds={partition name}""" return df.ix[:, 0].max().split('=')[1] @classmethod def _partition_query( cls, table_name, limit=0, order_by=None, filters=None): return f'SHOW PARTITIONS {table_name}' @classmethod def modify_url_for_impersonation(cls, url, impersonate_user, username): """ Modify the SQL Alchemy URL object with the user to impersonate if applicable. :param url: SQLAlchemy URL object :param impersonate_user: Bool indicating if impersonation is enabled :param username: Effective username """ # Do nothing in the URL object since instead this should modify # the configuraiton dictionary. See get_configuration_for_impersonation pass @classmethod def get_configuration_for_impersonation(cls, uri, impersonate_user, username): """ Return a configuration dictionary that can be merged with other configs that can set the correct properties for impersonating users :param uri: URI string :param impersonate_user: Bool indicating if impersonation is enabled :param username: Effective username :return: Dictionary with configs required for impersonation """ configuration = {} url = make_url(uri) backend_name = url.get_backend_name() # Must be Hive connection, enable impersonation, and set param auth=LDAP|KERBEROS if (backend_name == 'hive' and 'auth' in url.query.keys() and impersonate_user is True and username is not None): configuration['hive.server2.proxy.user'] = username return configuration @staticmethod def execute(cursor, query, async_=False): kwargs = {'async': async_} cursor.execute(query, **kwargs) class MssqlEngineSpec(BaseEngineSpec): engine = 'mssql' epoch_to_dttm = "dateadd(S, {col}, '1970-01-01')" limit_method = LimitMethod.WRAP_SQL time_grain_functions = { None: '{col}', 'PT1S': "DATEADD(second, DATEDIFF(second, '2000-01-01', {col}), '2000-01-01')", 'PT1M': 'DATEADD(minute, DATEDIFF(minute, 0, {col}), 0)', 'PT5M': 'DATEADD(minute, DATEDIFF(minute, 0, {col}) / 5 * 5, 0)', 'PT10M': 'DATEADD(minute, DATEDIFF(minute, 0, {col}) / 10 * 10, 0)', 'PT15M': 'DATEADD(minute, DATEDIFF(minute, 0, {col}) / 15 * 15, 0)', 'PT0.5H': 'DATEADD(minute, DATEDIFF(minute, 0, {col}) / 30 * 30, 0)', 'PT1H': 'DATEADD(hour, DATEDIFF(hour, 0, {col}), 0)', 'P1D': 'DATEADD(day, DATEDIFF(day, 0, {col}), 0)', 'P1W': 'DATEADD(week, DATEDIFF(week, 0, {col}), 0)', 'P1M': 'DATEADD(month, DATEDIFF(month, 0, {col}), 0)', 'P0.25Y': 'DATEADD(quarter, DATEDIFF(quarter, 0, {col}), 0)', 'P1Y': 'DATEADD(year, DATEDIFF(year, 0, {col}), 0)', } @classmethod def convert_dttm(cls, target_type, dttm): return "CONVERT(DATETIME, '{}', 126)".format(dttm.isoformat()) @classmethod def fetch_data(cls, cursor, limit): data = super(MssqlEngineSpec, cls).fetch_data(cursor, limit) if len(data) != 0 and type(data[0]).__name__ == 'Row': data = [[elem for elem in r] for r in data] return data class AthenaEngineSpec(BaseEngineSpec): engine = 'awsathena' time_grain_functions = { None: '{col}', 'PT1S': "date_trunc('second', CAST({col} AS TIMESTAMP))", 'PT1M': "date_trunc('minute', CAST({col} AS TIMESTAMP))", 'PT1H': "date_trunc('hour', CAST({col} AS TIMESTAMP))", 'P1D': "date_trunc('day', CAST({col} AS TIMESTAMP))", 'P1W': "date_trunc('week', CAST({col} AS TIMESTAMP))", 'P1M': "date_trunc('month', CAST({col} AS TIMESTAMP))", 'P0.25Y': "date_trunc('quarter', CAST({col} AS TIMESTAMP))", 'P1Y': "date_trunc('year', CAST({col} AS TIMESTAMP))", 'P1W/1970-01-03T00:00:00Z': "date_add('day', 5, date_trunc('week', \ date_add('day', 1, CAST({col} AS TIMESTAMP))))", '1969-12-28T00:00:00Z/P1W': "date_add('day', -1, date_trunc('week', \ date_add('day', 1, CAST({col} AS TIMESTAMP))))", } @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "from_iso8601_date('{}')".format(dttm.isoformat()[:10]) if tt == 'TIMESTAMP': return "from_iso8601_timestamp('{}')".format(dttm.isoformat()) return ("CAST ('{}' AS TIMESTAMP)" .format(dttm.strftime('%Y-%m-%d %H:%M:%S'))) @classmethod def epoch_to_dttm(cls): return 'from_unixtime({col})' class ClickHouseEngineSpec(BaseEngineSpec): """Dialect for ClickHouse analytical DB.""" engine = 'clickhouse' time_secondary_columns = True time_groupby_inline = True time_grain_functions = { None: '{col}', 'PT1M': 'toStartOfMinute(toDateTime({col}))', 'PT5M': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 300)*300)', 'PT10M': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 600)*600)', 'PT15M': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 900)*900)', 'PT0.5H': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 1800)*1800)', 'PT1H': 'toStartOfHour(toDateTime({col}))', 'P1D': 'toStartOfDay(toDateTime({col}))', 'P1W': 'toMonday(toDateTime({col}))', 'P1M': 'toStartOfMonth(toDateTime({col}))', 'P0.25Y': 'toStartOfQuarter(toDateTime({col}))', 'P1Y': 'toStartOfYear(toDateTime({col}))', } @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "toDate('{}')".format(dttm.strftime('%Y-%m-%d')) if tt == 'DATETIME': return "toDateTime('{}')".format( dttm.strftime('%Y-%m-%d %H:%M:%S')) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) class BQEngineSpec(BaseEngineSpec): """Engine spec for Google's BigQuery As contributed by @mxmzdlv on issue #945""" engine = 'bigquery' """ https://www.python.org/dev/peps/pep-0249/#arraysize raw_connections bypass the pybigquery query execution context and deal with raw dbapi connection directly. If this value is not set, the default value is set to 1, as described here, https://googlecloudplatform.github.io/google-cloud-python/latest/_modules/google/cloud/bigquery/dbapi/cursor.html#Cursor The default value of 5000 is derived from the pybigquery. https://github.com/mxmzdlv/pybigquery/blob/d214bb089ca0807ca9aaa6ce4d5a01172d40264e/pybigquery/sqlalchemy_bigquery.py#L102 """ arraysize = 5000 time_grain_functions = { None: '{col}', 'PT1S': 'TIMESTAMP_TRUNC({col}, SECOND)', 'PT1M': 'TIMESTAMP_TRUNC({col}, MINUTE)', 'PT1H': 'TIMESTAMP_TRUNC({col}, HOUR)', 'P1D': 'TIMESTAMP_TRUNC({col}, DAY)', 'P1W': 'TIMESTAMP_TRUNC({col}, WEEK)', 'P1M': 'TIMESTAMP_TRUNC({col}, MONTH)', 'P0.25Y': 'TIMESTAMP_TRUNC({col}, QUARTER)', 'P1Y': 'TIMESTAMP_TRUNC({col}, YEAR)', } @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "'{}'".format(dttm.strftime('%Y-%m-%d')) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) @classmethod def fetch_data(cls, cursor, limit): data = super(BQEngineSpec, cls).fetch_data(cursor, limit) if len(data) != 0 and type(data[0]).__name__ == 'Row': data = [r.values() for r in data] return data @staticmethod def mutate_label(label): """ BigQuery field_name should start with a letter or underscore, contain only alphanumeric characters and be at most 128 characters long. Labels that start with a number are prefixed with an underscore. Any unsupported characters are replaced with underscores and an md5 hash is added to the end of the label to avoid possible collisions. If the resulting label exceeds 128 characters, only the md5 sum is returned. :param str label: the original label which might include unsupported characters :return: String that is supported by the database """ hashed_label = '_' + hashlib.md5(label.encode('utf-8')).hexdigest() # if label starts with number, add underscore as first character mutated_label = '_' + label if re.match(r'^\d', label) else label # replace non-alphanumeric characters with underscores mutated_label = re.sub(r'[^\w]+', '_', mutated_label) if mutated_label != label: # add md5 hash to label to avoid possible collisions mutated_label += hashed_label # return only hash if length of final label exceeds 128 chars return mutated_label if len(mutated_label) <= 128 else hashed_label @classmethod def extra_table_metadata(cls, database, table_name, schema_name): indexes = database.get_indexes(table_name, schema_name) if not indexes: return {} partitions_columns = [ index.get('column_names', []) for index in indexes if index.get('name') == 'partition' ] cluster_columns = [ index.get('column_names', []) for index in indexes if index.get('name') == 'clustering' ] return { 'partitions': { 'cols': partitions_columns, }, 'clustering': { 'cols': cluster_columns, }, } @classmethod def _get_fields(cls, cols): """ BigQuery dialect requires us to not use backtick in the fieldname which are nested. Using literal_column handles that issue. http://docs.sqlalchemy.org/en/latest/core/tutorial.html#using-more-specific-text-with-table-literal-column-and-column Also explicility specifying column names so we don't encounter duplicate column names in the result. """ return [sqla.literal_column(c.get('name')).label(c.get('name').replace('.', '__')) for c in cols] class ImpalaEngineSpec(BaseEngineSpec): """Engine spec for Cloudera's Impala""" engine = 'impala' time_grain_functions = { None: '{col}', 'PT1M': "TRUNC({col}, 'MI')", 'PT1H': "TRUNC({col}, 'HH')", 'P1D': "TRUNC({col}, 'DD')", 'P1W': "TRUNC({col}, 'WW')", 'P1M': "TRUNC({col}, 'MONTH')", 'P0.25Y': "TRUNC({col}, 'Q')", 'P1Y': "TRUNC({col}, 'YYYY')", } @classmethod def epoch_to_dttm(cls): return 'from_unixtime({col})' @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "'{}'".format(dttm.strftime('%Y-%m-%d')) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) @classmethod def get_schema_names(cls, inspector): schemas = [row[0] for row in inspector.engine.execute('SHOW SCHEMAS') if not row[0].startswith('_')] return schemas class DruidEngineSpec(BaseEngineSpec): """Engine spec for Druid.io""" engine = 'druid' inner_joins = False allows_subquery = False time_grain_functions = { None: '{col}', 'PT1S': 'FLOOR({col} TO SECOND)', 'PT1M': 'FLOOR({col} TO MINUTE)', 'PT1H': 'FLOOR({col} TO HOUR)', 'P1D': 'FLOOR({col} TO DAY)', 'P1W': 'FLOOR({col} TO WEEK)', 'P1M': 'FLOOR({col} TO MONTH)', 'P0.25Y': 'FLOOR({col} TO QUARTER)', 'P1Y': 'FLOOR({col} TO YEAR)', } class GSheetsEngineSpec(SqliteEngineSpec): """Engine for Google spreadsheets""" engine = 'gsheets' inner_joins = False allows_subquery = False class KylinEngineSpec(BaseEngineSpec): """Dialect for Apache Kylin""" engine = 'kylin' time_grain_functions = { None: '{col}', 'PT1S': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO SECOND) AS TIMESTAMP)', 'PT1M': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MINUTE) AS TIMESTAMP)', 'PT1H': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO HOUR) AS TIMESTAMP)', 'P1D': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO DAY) AS DATE)', 'P1W': 'CAST(TIMESTAMPADD(WEEK, WEEK(CAST({col} AS DATE)) - 1, \ FLOOR(CAST({col} AS TIMESTAMP) TO YEAR)) AS DATE)', 'P1M': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MONTH) AS DATE)', 'P0.25Y': 'CAST(TIMESTAMPADD(QUARTER, QUARTER(CAST({col} AS DATE)) - 1, \ FLOOR(CAST({col} AS TIMESTAMP) TO YEAR)) AS DATE)', 'P1Y': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO YEAR) AS DATE)', } @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "CAST('{}' AS DATE)".format(dttm.isoformat()[:10]) if tt == 'TIMESTAMP': return "CAST('{}' AS TIMESTAMP)".format( dttm.strftime('%Y-%m-%d %H:%M:%S')) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) class TeradataEngineSpec(BaseEngineSpec): """Dialect for Teradata DB.""" engine = 'teradata' limit_method = LimitMethod.WRAP_SQL time_grain_functions = { None: '{col}', 'PT1M': "TRUNC(CAST({col} as DATE), 'MI')", 'PT1H': "TRUNC(CAST({col} as DATE), 'HH')", 'P1D': "TRUNC(CAST({col} as DATE), 'DDD')", 'P1W': "TRUNC(CAST({col} as DATE), 'WW')", 'P1M': "TRUNC(CAST({col} as DATE), 'MONTH')", 'P0.25Y': "TRUNC(CAST({col} as DATE), 'Q')", 'P1Y': "TRUNC(CAST({col} as DATE), 'YEAR')", } engines = { o.engine: o for o in globals().values() if inspect.isclass(o) and issubclass(o, BaseEngineSpec)}
37.829091
126
0.594156
from collections import namedtuple import hashlib import inspect import logging import os import re import textwrap import time from flask import g from flask_babel import lazy_gettext as _ import pandas import sqlalchemy as sqla from sqlalchemy import Column, select from sqlalchemy.engine import create_engine from sqlalchemy.engine.url import make_url from sqlalchemy.sql import quoted_name, text from sqlalchemy.sql.expression import TextAsFrom import sqlparse from werkzeug.utils import secure_filename from superset import app, conf, db, sql_parse from superset.exceptions import SupersetTemplateException from superset.utils import core as utils QueryStatus = utils.QueryStatus config = app.config tracking_url_trans = conf.get('TRACKING_URL_TRANSFORMER') hive_poll_interval = conf.get('HIVE_POLL_INTERVAL') Grain = namedtuple('Grain', 'name label function duration') builtin_time_grains = { None: 'Time Column', 'PT1S': 'second', 'PT1M': 'minute', 'PT5M': '5 minute', 'PT10M': '10 minute', 'PT15M': '15 minute', 'PT0.5H': 'half hour', 'PT1H': 'hour', 'P1D': 'day', 'P1W': 'week', 'P1M': 'month', 'P0.25Y': 'quarter', 'P1Y': 'year', '1969-12-28T00:00:00Z/P1W': 'week_start_sunday', '1969-12-29T00:00:00Z/P1W': 'week_start_monday', 'P1W/1970-01-03T00:00:00Z': 'week_ending_saturday', 'P1W/1970-01-04T00:00:00Z': 'week_ending_sunday', } def _create_time_grains_tuple(time_grains, time_grain_functions, blacklist): ret_list = [] blacklist = blacklist if blacklist else [] for duration, func in time_grain_functions.items(): if duration not in blacklist: name = time_grains.get(duration) ret_list.append(Grain(name, _(name), func, duration)) return tuple(ret_list) class LimitMethod(object): FETCH_MANY = 'fetch_many' WRAP_SQL = 'wrap_sql' FORCE_LIMIT = 'force_limit' class BaseEngineSpec(object): engine = 'base' time_grain_functions = {} time_groupby_inline = False limit_method = LimitMethod.FORCE_LIMIT time_secondary_columns = False inner_joins = True allows_subquery = True force_column_alias_quotes = False arraysize = None @classmethod def get_time_grains(cls): blacklist = config.get('TIME_GRAIN_BLACKLIST', []) grains = builtin_time_grains.copy() grains.update(config.get('TIME_GRAIN_ADDONS', {})) grain_functions = cls.time_grain_functions.copy() grain_addon_functions = config.get('TIME_GRAIN_ADDON_FUNCTIONS', {}) grain_functions.update(grain_addon_functions.get(cls.engine, {})) return _create_time_grains_tuple(grains, grain_functions, blacklist) @classmethod def fetch_data(cls, cursor, limit): if cls.arraysize: cursor.arraysize = cls.arraysize if cls.limit_method == LimitMethod.FETCH_MANY: return cursor.fetchmany(limit) return cursor.fetchall() @classmethod def epoch_to_dttm(cls): raise NotImplementedError() @classmethod def epoch_ms_to_dttm(cls): return cls.epoch_to_dttm().replace('{col}', '({col}/1000.000)') @classmethod def get_datatype(cls, type_code): if isinstance(type_code, str) and len(type_code): return type_code.upper() @classmethod def extra_table_metadata(cls, database, table_name, schema_name): return {} @classmethod def apply_limit_to_sql(cls, sql, limit, database): if cls.limit_method == LimitMethod.WRAP_SQL: sql = sql.strip('\t\n ;') qry = ( select('*') .select_from( TextAsFrom(text(sql), ['*']).alias('inner_qry'), ) .limit(limit) ) return database.compile_sqla_query(qry) elif LimitMethod.FORCE_LIMIT: parsed_query = sql_parse.ParsedQuery(sql) sql = parsed_query.get_query_with_new_limit(limit) return sql @classmethod def get_limit_from_sql(cls, sql): parsed_query = sql_parse.ParsedQuery(sql) return parsed_query.limit @classmethod def get_query_with_new_limit(cls, sql, limit): parsed_query = sql_parse.ParsedQuery(sql) return parsed_query.get_query_with_new_limit(limit) @staticmethod def csv_to_df(**kwargs): kwargs['filepath_or_buffer'] = \ config['UPLOAD_FOLDER'] + kwargs['filepath_or_buffer'] kwargs['encoding'] = 'utf-8' kwargs['iterator'] = True chunks = pandas.read_csv(**kwargs) df = pandas.DataFrame() df = pandas.concat(chunk for chunk in chunks) return df @staticmethod def df_to_db(df, table, **kwargs): df.to_sql(**kwargs) table.user_id = g.user.id table.schema = kwargs['schema'] table.fetch_metadata() db.session.add(table) db.session.commit() @staticmethod def create_table_from_csv(form, table): def _allowed_file(filename): extension = os.path.splitext(filename)[1] return extension and extension[1:] in config['ALLOWED_EXTENSIONS'] filename = secure_filename(form.csv_file.data.filename) if not _allowed_file(filename): raise Exception('Invalid file type selected') kwargs = { 'filepath_or_buffer': filename, 'sep': form.sep.data, 'header': form.header.data if form.header.data else 0, 'index_col': form.index_col.data, 'mangle_dupe_cols': form.mangle_dupe_cols.data, 'skipinitialspace': form.skipinitialspace.data, 'skiprows': form.skiprows.data, 'nrows': form.nrows.data, 'skip_blank_lines': form.skip_blank_lines.data, 'parse_dates': form.parse_dates.data, 'infer_datetime_format': form.infer_datetime_format.data, 'chunksize': 10000, } df = BaseEngineSpec.csv_to_df(**kwargs) df_to_db_kwargs = { 'table': table, 'df': df, 'name': form.name.data, 'con': create_engine(form.con.data.sqlalchemy_uri_decrypted, echo=False), 'schema': form.schema.data, 'if_exists': form.if_exists.data, 'index': form.index.data, 'index_label': form.index_label.data, 'chunksize': 10000, } BaseEngineSpec.df_to_db(**df_to_db_kwargs) @classmethod def convert_dttm(cls, target_type, dttm): return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) @classmethod def fetch_result_sets(cls, db, datasource_type): schemas = db.all_schema_names(cache=db.schema_cache_enabled, cache_timeout=db.schema_cache_timeout, force=True) all_result_sets = [] for schema in schemas: if datasource_type == 'table': all_datasource_names = db.all_table_names_in_schema( schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout) elif datasource_type == 'view': all_datasource_names = db.all_view_names_in_schema( schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout) all_result_sets += [ '{}.{}'.format(schema, t) for t in all_datasource_names] return all_result_sets @classmethod def handle_cursor(cls, cursor, query, session): pass @classmethod def extract_error_message(cls, e): return utils.error_msg_from_exception(e) @classmethod def adjust_database_uri(cls, uri, selected_schema): return uri @classmethod def patch(cls): pass @classmethod def get_schema_names(cls, inspector): return sorted(inspector.get_schema_names()) @classmethod def get_table_names(cls, inspector, schema): return sorted(inspector.get_table_names(schema)) @classmethod def get_view_names(cls, inspector, schema): return sorted(inspector.get_view_names(schema)) @classmethod def where_latest_partition( cls, table_name, schema, database, qry, columns=None): return False @classmethod def _get_fields(cls, cols): return [sqla.column(c.get('name')) for c in cols] @classmethod def select_star(cls, my_db, table_name, engine, schema=None, limit=100, show_cols=False, indent=True, latest_partition=True, cols=None): fields = '*' cols = cols or [] if (show_cols or latest_partition) and not cols: cols = my_db.get_columns(table_name, schema) if show_cols: fields = cls._get_fields(cols) quote = engine.dialect.identifier_preparer.quote if schema: full_table_name = quote(schema) + '.' + quote(table_name) else: full_table_name = quote(table_name) qry = select(fields).select_from(text(full_table_name)) if limit: qry = qry.limit(limit) if latest_partition: partition_query = cls.where_latest_partition( table_name, schema, my_db, qry, columns=cols) if partition_query != False: qry = partition_query sql = my_db.compile_sqla_query(qry) if indent: sql = sqlparse.format(sql, reindent=True) return sql @classmethod def modify_url_for_impersonation(cls, url, impersonate_user, username): if impersonate_user is not None and username is not None: url.username = username @classmethod def get_configuration_for_impersonation(cls, uri, impersonate_user, username): return {} @classmethod def execute(cls, cursor, query, **kwargs): if cls.arraysize: cursor.arraysize = cls.arraysize cursor.execute(query) @classmethod def make_label_compatible(cls, label): label = cls.mutate_label(label) return quoted_name(label, True) if cls.force_column_alias_quotes else label @staticmethod def mutate_label(label): return label class PostgresBaseEngineSpec(BaseEngineSpec): engine = '' time_grain_functions = { None: '{col}', 'PT1S': "DATE_TRUNC('second', {col}) AT TIME ZONE 'UTC'", 'PT1M': "DATE_TRUNC('minute', {col}) AT TIME ZONE 'UTC'", 'PT1H': "DATE_TRUNC('hour', {col}) AT TIME ZONE 'UTC'", 'P1D': "DATE_TRUNC('day', {col}) AT TIME ZONE 'UTC'", 'P1W': "DATE_TRUNC('week', {col}) AT TIME ZONE 'UTC'", 'P1M': "DATE_TRUNC('month', {col}) AT TIME ZONE 'UTC'", 'P0.25Y': "DATE_TRUNC('quarter', {col}) AT TIME ZONE 'UTC'", 'P1Y': "DATE_TRUNC('year', {col}) AT TIME ZONE 'UTC'", } @classmethod def fetch_data(cls, cursor, limit): if not cursor.description: return [] if cls.limit_method == LimitMethod.FETCH_MANY: return cursor.fetchmany(limit) return cursor.fetchall() @classmethod def epoch_to_dttm(cls): return "(timestamp 'epoch' + {col} * interval '1 second')" @classmethod def convert_dttm(cls, target_type, dttm): return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) class PostgresEngineSpec(PostgresBaseEngineSpec): engine = 'postgresql' @classmethod def get_table_names(cls, inspector, schema): tables = inspector.get_table_names(schema) tables.extend(inspector.get_foreign_table_names(schema)) return sorted(tables) class SnowflakeEngineSpec(PostgresBaseEngineSpec): engine = 'snowflake' force_column_alias_quotes = True time_grain_functions = { None: '{col}', 'PT1S': "DATE_TRUNC('SECOND', {col})", 'PT1M': "DATE_TRUNC('MINUTE', {col})", 'PT5M': "DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 5) * 5, \ DATE_TRUNC('HOUR', {col}))", 'PT10M': "DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 10) * 10, \ DATE_TRUNC('HOUR', {col}))", 'PT15M': "DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 15) * 15, \ DATE_TRUNC('HOUR', {col}))", 'PT0.5H': "DATEADD(MINUTE, FLOOR(DATE_PART(MINUTE, {col}) / 30) * 30, \ DATE_TRUNC('HOUR', {col}))", 'PT1H': "DATE_TRUNC('HOUR', {col})", 'P1D': "DATE_TRUNC('DAY', {col})", 'P1W': "DATE_TRUNC('WEEK', {col})", 'P1M': "DATE_TRUNC('MONTH', {col})", 'P0.25Y': "DATE_TRUNC('QUARTER', {col})", 'P1Y': "DATE_TRUNC('YEAR', {col})", } @classmethod def adjust_database_uri(cls, uri, selected_schema=None): database = uri.database if '/' in uri.database: database = uri.database.split('/')[0] if selected_schema: uri.database = database + '/' + selected_schema return uri class VerticaEngineSpec(PostgresBaseEngineSpec): engine = 'vertica' class RedshiftEngineSpec(PostgresBaseEngineSpec): engine = 'redshift' @staticmethod def mutate_label(label): return label.lower() class OracleEngineSpec(PostgresBaseEngineSpec): engine = 'oracle' limit_method = LimitMethod.WRAP_SQL force_column_alias_quotes = True time_grain_functions = { None: '{col}', 'PT1S': 'CAST({col} as DATE)', 'PT1M': "TRUNC(CAST({col} as DATE), 'MI')", 'PT1H': "TRUNC(CAST({col} as DATE), 'HH')", 'P1D': "TRUNC(CAST({col} as DATE), 'DDD')", 'P1W': "TRUNC(CAST({col} as DATE), 'WW')", 'P1M': "TRUNC(CAST({col} as DATE), 'MONTH')", 'P0.25Y': "TRUNC(CAST({col} as DATE), 'Q')", 'P1Y': "TRUNC(CAST({col} as DATE), 'YEAR')", } @classmethod def convert_dttm(cls, target_type, dttm): return ( """TO_TIMESTAMP('{}', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')""" ).format(dttm.isoformat()) @staticmethod def mutate_label(label): if len(label) > 30: hashed_label = hashlib.md5(label.encode('utf-8')).hexdigest() return hashed_label[:30] return label class Db2EngineSpec(BaseEngineSpec): engine = 'ibm_db_sa' limit_method = LimitMethod.WRAP_SQL force_column_alias_quotes = True time_grain_functions = { None: '{col}', 'PT1S': 'CAST({col} as TIMESTAMP)' ' - MICROSECOND({col}) MICROSECONDS', 'PT1M': 'CAST({col} as TIMESTAMP)' ' - SECOND({col}) SECONDS' ' - MICROSECOND({col}) MICROSECONDS', 'PT1H': 'CAST({col} as TIMESTAMP)' ' - MINUTE({col}) MINUTES' ' - SECOND({col}) SECONDS' ' - MICROSECOND({col}) MICROSECONDS ', 'P1D': 'CAST({col} as TIMESTAMP)' ' - HOUR({col}) HOURS' ' - MINUTE({col}) MINUTES' ' - SECOND({col}) SECONDS' ' - MICROSECOND({col}) MICROSECONDS', 'P1W': '{col} - (DAYOFWEEK({col})) DAYS', 'P1M': '{col} - (DAY({col})-1) DAYS', 'P0.25Y': '{col} - (DAY({col})-1) DAYS' ' - (MONTH({col})-1) MONTHS' ' + ((QUARTER({col})-1) * 3) MONTHS', 'P1Y': '{col} - (DAY({col})-1) DAYS' ' - (MONTH({col})-1) MONTHS', } @classmethod def epoch_to_dttm(cls): return "(TIMESTAMP('1970-01-01', '00:00:00') + {col} SECONDS)" @classmethod def convert_dttm(cls, target_type, dttm): return "'{}'".format(dttm.strftime('%Y-%m-%d-%H.%M.%S')) @staticmethod def mutate_label(label): if len(label) > 30: hashed_label = hashlib.md5(label.encode('utf-8')).hexdigest() return hashed_label[:30] return label class SqliteEngineSpec(BaseEngineSpec): engine = 'sqlite' time_grain_functions = { None: '{col}', 'PT1H': "DATETIME(STRFTIME('%Y-%m-%dT%H:00:00', {col}))", 'P1D': 'DATE({col})', 'P1W': "DATE({col}, -strftime('%W', {col}) || ' days')", 'P1M': "DATE({col}, -strftime('%d', {col}) || ' days', '+1 day')", 'P1Y': "DATETIME(STRFTIME('%Y-01-01T00:00:00', {col}))", 'P1W/1970-01-03T00:00:00Z': "DATE({col}, 'weekday 6')", '1969-12-28T00:00:00Z/P1W': "DATE({col}, 'weekday 0', '-7 days')", } @classmethod def epoch_to_dttm(cls): return "datetime({col}, 'unixepoch')" @classmethod def fetch_result_sets(cls, db, datasource_type): schemas = db.all_schema_names(cache=db.schema_cache_enabled, cache_timeout=db.schema_cache_timeout, force=True) all_result_sets = [] schema = schemas[0] if datasource_type == 'table': all_datasource_names = db.all_table_names_in_schema( schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout) elif datasource_type == 'view': all_datasource_names = db.all_view_names_in_schema( schema=schema, force=True, cache=db.table_cache_enabled, cache_timeout=db.table_cache_timeout) all_result_sets += [ '{}.{}'.format(schema, t) for t in all_datasource_names] return all_result_sets @classmethod def convert_dttm(cls, target_type, dttm): iso = dttm.isoformat().replace('T', ' ') if '.' not in iso: iso += '.000000' return "'{}'".format(iso) @classmethod def get_table_names(cls, inspector, schema): return sorted(inspector.get_table_names()) class MySQLEngineSpec(BaseEngineSpec): engine = 'mysql' time_grain_functions = { None: '{col}', 'PT1S': 'DATE_ADD(DATE({col}), ' 'INTERVAL (HOUR({col})*60*60 + MINUTE({col})*60' ' + SECOND({col})) SECOND)', 'PT1M': 'DATE_ADD(DATE({col}), ' 'INTERVAL (HOUR({col})*60 + MINUTE({col})) MINUTE)', 'PT1H': 'DATE_ADD(DATE({col}), ' 'INTERVAL HOUR({col}) HOUR)', 'P1D': 'DATE({col})', 'P1W': 'DATE(DATE_SUB({col}, ' 'INTERVAL DAYOFWEEK({col}) - 1 DAY))', 'P1M': 'DATE(DATE_SUB({col}, ' 'INTERVAL DAYOFMONTH({col}) - 1 DAY))', 'P0.25Y': 'MAKEDATE(YEAR({col}), 1) ' '+ INTERVAL QUARTER({col}) QUARTER - INTERVAL 1 QUARTER', 'P1Y': 'DATE(DATE_SUB({col}, ' 'INTERVAL DAYOFYEAR({col}) - 1 DAY))', '1969-12-29T00:00:00Z/P1W': 'DATE(DATE_SUB({col}, ' 'INTERVAL DAYOFWEEK(DATE_SUB({col}, INTERVAL 1 DAY)) - 1 DAY))', } type_code_map = {} @classmethod def convert_dttm(cls, target_type, dttm): if target_type.upper() in ('DATETIME', 'DATE'): return "STR_TO_DATE('{}', '%Y-%m-%d %H:%i:%s')".format( dttm.strftime('%Y-%m-%d %H:%M:%S')) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) @classmethod def adjust_database_uri(cls, uri, selected_schema=None): if selected_schema: uri.database = selected_schema return uri @classmethod def get_datatype(cls, type_code): if not cls.type_code_map: import MySQLdb ft = MySQLdb.constants.FIELD_TYPE cls.type_code_map = { getattr(ft, k): k for k in dir(ft) if not k.startswith('_') } datatype = type_code if isinstance(type_code, int): datatype = cls.type_code_map.get(type_code) if datatype and isinstance(datatype, str) and len(datatype): return datatype @classmethod def epoch_to_dttm(cls): return 'from_unixtime({col})' @classmethod def extract_error_message(cls, e): message = str(e) try: if isinstance(e.args, tuple) and len(e.args) > 1: message = e.args[1] except Exception: pass return message class PrestoEngineSpec(BaseEngineSpec): engine = 'presto' time_grain_functions = { None: '{col}', 'PT1S': "date_trunc('second', CAST({col} AS TIMESTAMP))", 'PT1M': "date_trunc('minute', CAST({col} AS TIMESTAMP))", 'PT1H': "date_trunc('hour', CAST({col} AS TIMESTAMP))", 'P1D': "date_trunc('day', CAST({col} AS TIMESTAMP))", 'P1W': "date_trunc('week', CAST({col} AS TIMESTAMP))", 'P1M': "date_trunc('month', CAST({col} AS TIMESTAMP))", 'P0.25Y': "date_trunc('quarter', CAST({col} AS TIMESTAMP))", 'P1Y': "date_trunc('year', CAST({col} AS TIMESTAMP))", 'P1W/1970-01-03T00:00:00Z': "date_add('day', 5, date_trunc('week', date_add('day', 1, \ CAST({col} AS TIMESTAMP))))", '1969-12-28T00:00:00Z/P1W': "date_add('day', -1, date_trunc('week', \ date_add('day', 1, CAST({col} AS TIMESTAMP))))", } @classmethod def get_view_names(cls, inspector, schema): return [] @classmethod def adjust_database_uri(cls, uri, selected_schema=None): database = uri.database if selected_schema and database: if '/' in database: database = database.split('/')[0] + '/' + selected_schema else: database += '/' + selected_schema uri.database = database return uri @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "from_iso8601_date('{}')".format(dttm.isoformat()[:10]) if tt == 'TIMESTAMP': return "from_iso8601_timestamp('{}')".format(dttm.isoformat()) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) @classmethod def epoch_to_dttm(cls): return 'from_unixtime({col})' @classmethod def fetch_result_sets(cls, db, datasource_type): result_set_df = db.get_df( """SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S ORDER BY concat(table_schema, '.', table_name)""".format( datasource_type.upper(), ), None) result_sets = [] for unused, row in result_set_df.iterrows(): result_sets.append('{}.{}'.format( row['table_schema'], row['table_name'])) return result_sets @classmethod def extra_table_metadata(cls, database, table_name, schema_name): indexes = database.get_indexes(table_name, schema_name) if not indexes: return {} cols = indexes[0].get('column_names', []) full_table_name = table_name if schema_name and '.' not in table_name: full_table_name = '{}.{}'.format(schema_name, table_name) pql = cls._partition_query(full_table_name) col_name, latest_part = cls.latest_partition( table_name, schema_name, database, show_first=True) return { 'partitions': { 'cols': cols, 'latest': {col_name: latest_part}, 'partitionQuery': pql, }, } @classmethod def handle_cursor(cls, cursor, query, session): logging.info('Polling the cursor for progress') polled = cursor.poll() while polled: stats = polled.get('stats', {}) query = session.query(type(query)).filter_by(id=query.id).one() if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]: cursor.cancel() break if stats: state = stats.get('state') if state == 'FINISHED': break completed_splits = float(stats.get('completedSplits')) total_splits = float(stats.get('totalSplits')) if total_splits and completed_splits: progress = 100 * (completed_splits / total_splits) logging.info( 'Query progress: {} / {} ' 'splits'.format(completed_splits, total_splits)) if progress > query.progress: query.progress = progress session.commit() time.sleep(1) logging.info('Polling the cursor for progress') polled = cursor.poll() @classmethod def extract_error_message(cls, e): if ( hasattr(e, 'orig') and type(e.orig).__name__ == 'DatabaseError' and isinstance(e.orig[0], dict)): error_dict = e.orig[0] return '{} at {}: {}'.format( error_dict.get('errorName'), error_dict.get('errorLocation'), error_dict.get('message'), ) if ( type(e).__name__ == 'DatabaseError' and hasattr(e, 'args') and len(e.args) > 0 ): error_dict = e.args[0] return error_dict.get('message') return utils.error_msg_from_exception(e) @classmethod def _partition_query( cls, table_name, limit=0, order_by=None, filters=None): limit_clause = 'LIMIT {}'.format(limit) if limit else '' order_by_clause = '' if order_by: l = [] for field, desc in order_by: l.append(field + ' DESC' if desc else '') order_by_clause = 'ORDER BY ' + ', '.join(l) where_clause = '' if filters: l = [] for field, value in filters.items(): l.append(f"{field} = '{value}'") where_clause = 'WHERE ' + ' AND '.join(l) sql = textwrap.dedent(f"""\ SHOW PARTITIONS FROM {table_name} {where_clause} {order_by_clause} {limit_clause} """) return sql @classmethod def where_latest_partition( cls, table_name, schema, database, qry, columns=None): try: col_name, value = cls.latest_partition( table_name, schema, database, show_first=True) except Exception: return False for c in columns: if c.get('name') == col_name: return qry.where(Column(col_name) == value) return False @classmethod def _latest_partition_from_df(cls, df): recs = df.to_records(index=False) if recs: return recs[0][0] @classmethod def latest_partition(cls, table_name, schema, database, show_first=False): indexes = database.get_indexes(table_name, schema) if len(indexes[0]['column_names']) < 1: raise SupersetTemplateException( 'The table should have one partitioned field') elif not show_first and len(indexes[0]['column_names']) > 1: raise SupersetTemplateException( 'The table should have a single partitioned field ' 'to use this function. You may want to use ' '`presto.latest_sub_partition`') part_field = indexes[0]['column_names'][0] sql = cls._partition_query(table_name, 1, [(part_field, True)]) df = database.get_df(sql, schema) return part_field, cls._latest_partition_from_df(df) @classmethod def latest_sub_partition(cls, table_name, schema, database, **kwargs): indexes = database.get_indexes(table_name, schema) part_fields = indexes[0]['column_names'] for k in kwargs.keys(): if k not in k in part_fields: msg = 'Field [{k}] is not part of the portioning key' raise SupersetTemplateException(msg) if len(kwargs.keys()) != len(part_fields) - 1: msg = ( 'A filter needs to be specified for {} out of the ' '{} fields.' ).format(len(part_fields) - 1, len(part_fields)) raise SupersetTemplateException(msg) for field in part_fields: if field not in kwargs.keys(): field_to_return = field sql = cls._partition_query( table_name, 1, [(field_to_return, True)], kwargs) df = database.get_df(sql, schema) if df.empty: return '' return df.to_dict()[field_to_return][0] class HiveEngineSpec(PrestoEngineSpec): engine = 'hive' jobs_stats_r = re.compile( r'.*INFO.*Total jobs = (?P<max_jobs>[0-9]+)') launching_job_r = re.compile( '.*INFO.*Launching Job (?P<job_number>[0-9]+) out of ' '(?P<max_jobs>[0-9]+)') stage_progress_r = re.compile( r'.*INFO.*Stage-(?P<stage_number>[0-9]+).*' r'map = (?P<map_progress>[0-9]+)%.*' r'reduce = (?P<reduce_progress>[0-9]+)%.*') @classmethod def patch(cls): from pyhive import hive from superset.db_engines import hive as patched_hive from TCLIService import ( constants as patched_constants, ttypes as patched_ttypes, TCLIService as patched_TCLIService) hive.TCLIService = patched_TCLIService hive.constants = patched_constants hive.ttypes = patched_ttypes hive.Cursor.fetch_logs = patched_hive.fetch_logs @classmethod def fetch_result_sets(cls, db, datasource_type): return BaseEngineSpec.fetch_result_sets( db, datasource_type) @classmethod def fetch_data(cls, cursor, limit): import pyhive from TCLIService import ttypes state = cursor.poll() if state.operationState == ttypes.TOperationState.ERROR_STATE: raise Exception('Query error', state.errorMessage) try: return super(HiveEngineSpec, cls).fetch_data(cursor, limit) except pyhive.exc.ProgrammingError: return [] @staticmethod def create_table_from_csv(form, table): def convert_to_hive_type(col_type): tableschema_to_hive_types = { 'boolean': 'BOOLEAN', 'integer': 'INT', 'number': 'DOUBLE', 'string': 'STRING', } return tableschema_to_hive_types.get(col_type, 'STRING') bucket_path = config['CSV_TO_HIVE_UPLOAD_S3_BUCKET'] if not bucket_path: logging.info('No upload bucket specified') raise Exception( 'No upload bucket specified. You can specify one in the config file.') table_name = form.name.data schema_name = form.schema.data if config.get('UPLOADED_CSV_HIVE_NAMESPACE'): if '.' in table_name or schema_name: raise Exception( "You can't specify a namespace. " 'All tables will be uploaded to the `{}` namespace'.format( config.get('HIVE_NAMESPACE'))) full_table_name = '{}.{}'.format( config.get('UPLOADED_CSV_HIVE_NAMESPACE'), table_name) else: if '.' in table_name and schema_name: raise Exception( "You can't specify a namespace both in the name of the table " 'and in the schema field. Please remove one') full_table_name = '{}.{}'.format( schema_name, table_name) if schema_name else table_name filename = form.csv_file.data.filename upload_prefix = config['CSV_TO_HIVE_UPLOAD_DIRECTORY'] upload_path = config['UPLOAD_FOLDER'] + \ secure_filename(filename) from tableschema import Table hive_table_schema = Table(upload_path).infer() column_name_and_type = [] for column_info in hive_table_schema['fields']: column_name_and_type.append( '`{}` {}'.format( column_info['name'], convert_to_hive_type(column_info['type']))) schema_definition = ', '.join(column_name_and_type) import boto3 s3 = boto3.client('s3') location = os.path.join('s3a://', bucket_path, upload_prefix, table_name) s3.upload_file( upload_path, bucket_path, os.path.join(upload_prefix, table_name, filename)) sql = f"""CREATE TABLE {full_table_name} ( {schema_definition} ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE LOCATION '{location}' tblproperties ('skip.header.line.count'='1')""" logging.info(form.con.data) engine = create_engine(form.con.data.sqlalchemy_uri_decrypted) engine.execute(sql) @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "CAST('{}' AS DATE)".format(dttm.isoformat()[:10]) elif tt == 'TIMESTAMP': return "CAST('{}' AS TIMESTAMP)".format( dttm.strftime('%Y-%m-%d %H:%M:%S')) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) @classmethod def adjust_database_uri(cls, uri, selected_schema=None): if selected_schema: uri.database = selected_schema return uri @classmethod def extract_error_message(cls, e): msg = str(e) match = re.search(r'errorMessage="(.*?)(?<!\\)"', msg) if match: msg = match.group(1) return msg @classmethod def progress(cls, log_lines): total_jobs = 1 current_job = 1 stages = {} for line in log_lines: match = cls.jobs_stats_r.match(line) if match: total_jobs = int(match.groupdict()['max_jobs']) or 1 match = cls.launching_job_r.match(line) if match: current_job = int(match.groupdict()['job_number']) total_jobs = int(match.groupdict()['max_jobs']) or 1 stages = {} match = cls.stage_progress_r.match(line) if match: stage_number = int(match.groupdict()['stage_number']) map_progress = int(match.groupdict()['map_progress']) reduce_progress = int(match.groupdict()['reduce_progress']) stages[stage_number] = (map_progress + reduce_progress) / 2 logging.info( 'Progress detail: {}, ' 'current job {}, ' 'total jobs: {}'.format(stages, current_job, total_jobs)) stage_progress = sum( stages.values()) / len(stages.values()) if stages else 0 progress = ( 100 * (current_job - 1) / total_jobs + stage_progress / total_jobs ) return int(progress) @classmethod def get_tracking_url(cls, log_lines): lkp = 'Tracking URL = ' for line in log_lines: if lkp in line: return line.split(lkp)[1] @classmethod def handle_cursor(cls, cursor, query, session): from pyhive import hive # pylint: disable=no-name-in-module unfinished_states = ( hive.ttypes.TOperationState.INITIALIZED_STATE, hive.ttypes.TOperationState.RUNNING_STATE, ) polled = cursor.poll() last_log_line = 0 tracking_url = None job_id = None while polled.operationState in unfinished_states: query = session.query(type(query)).filter_by(id=query.id).one() if query.status == QueryStatus.STOPPED: cursor.cancel() break log = cursor.fetch_logs() or '' if log: log_lines = log.splitlines() progress = cls.progress(log_lines) logging.info('Progress total: {}'.format(progress)) needs_commit = False if progress > query.progress: query.progress = progress needs_commit = True if not tracking_url: tracking_url = cls.get_tracking_url(log_lines) if tracking_url: job_id = tracking_url.split('/')[-2] logging.info( 'Found the tracking url: {}'.format(tracking_url)) tracking_url = tracking_url_trans(tracking_url) logging.info( 'Transformation applied: {}'.format(tracking_url)) query.tracking_url = tracking_url logging.info('Job id: {}'.format(job_id)) needs_commit = True if job_id and len(log_lines) > last_log_line: # Wait for job id before logging things out # this allows for prefixing all log lines and becoming # searchable in something like Kibana for l in log_lines[last_log_line:]: logging.info('[{}] {}'.format(job_id, l)) last_log_line = len(log_lines) if needs_commit: session.commit() time.sleep(hive_poll_interval) polled = cursor.poll() @classmethod def where_latest_partition( cls, table_name, schema, database, qry, columns=None): try: col_name, value = cls.latest_partition( table_name, schema, database, show_first=True) except Exception: # table is not partitioned return False for c in columns: if c.get('name') == col_name: return qry.where(Column(col_name) == value) return False @classmethod def latest_sub_partition(cls, table_name, schema, database, **kwargs): # TODO(bogdan): implement` pass @classmethod def _latest_partition_from_df(cls, df): return df.ix[:, 0].max().split('=')[1] @classmethod def _partition_query( cls, table_name, limit=0, order_by=None, filters=None): return f'SHOW PARTITIONS {table_name}' @classmethod def modify_url_for_impersonation(cls, url, impersonate_user, username): # Do nothing in the URL object since instead this should modify # the configuraiton dictionary. See get_configuration_for_impersonation pass @classmethod def get_configuration_for_impersonation(cls, uri, impersonate_user, username): configuration = {} url = make_url(uri) backend_name = url.get_backend_name() # Must be Hive connection, enable impersonation, and set param auth=LDAP|KERBEROS if (backend_name == 'hive' and 'auth' in url.query.keys() and impersonate_user is True and username is not None): configuration['hive.server2.proxy.user'] = username return configuration @staticmethod def execute(cursor, query, async_=False): kwargs = {'async': async_} cursor.execute(query, **kwargs) class MssqlEngineSpec(BaseEngineSpec): engine = 'mssql' epoch_to_dttm = "dateadd(S, {col}, '1970-01-01')" limit_method = LimitMethod.WRAP_SQL time_grain_functions = { None: '{col}', 'PT1S': "DATEADD(second, DATEDIFF(second, '2000-01-01', {col}), '2000-01-01')", 'PT1M': 'DATEADD(minute, DATEDIFF(minute, 0, {col}), 0)', 'PT5M': 'DATEADD(minute, DATEDIFF(minute, 0, {col}) / 5 * 5, 0)', 'PT10M': 'DATEADD(minute, DATEDIFF(minute, 0, {col}) / 10 * 10, 0)', 'PT15M': 'DATEADD(minute, DATEDIFF(minute, 0, {col}) / 15 * 15, 0)', 'PT0.5H': 'DATEADD(minute, DATEDIFF(minute, 0, {col}) / 30 * 30, 0)', 'PT1H': 'DATEADD(hour, DATEDIFF(hour, 0, {col}), 0)', 'P1D': 'DATEADD(day, DATEDIFF(day, 0, {col}), 0)', 'P1W': 'DATEADD(week, DATEDIFF(week, 0, {col}), 0)', 'P1M': 'DATEADD(month, DATEDIFF(month, 0, {col}), 0)', 'P0.25Y': 'DATEADD(quarter, DATEDIFF(quarter, 0, {col}), 0)', 'P1Y': 'DATEADD(year, DATEDIFF(year, 0, {col}), 0)', } @classmethod def convert_dttm(cls, target_type, dttm): return "CONVERT(DATETIME, '{}', 126)".format(dttm.isoformat()) @classmethod def fetch_data(cls, cursor, limit): data = super(MssqlEngineSpec, cls).fetch_data(cursor, limit) if len(data) != 0 and type(data[0]).__name__ == 'Row': data = [[elem for elem in r] for r in data] return data class AthenaEngineSpec(BaseEngineSpec): engine = 'awsathena' time_grain_functions = { None: '{col}', 'PT1S': "date_trunc('second', CAST({col} AS TIMESTAMP))", 'PT1M': "date_trunc('minute', CAST({col} AS TIMESTAMP))", 'PT1H': "date_trunc('hour', CAST({col} AS TIMESTAMP))", 'P1D': "date_trunc('day', CAST({col} AS TIMESTAMP))", 'P1W': "date_trunc('week', CAST({col} AS TIMESTAMP))", 'P1M': "date_trunc('month', CAST({col} AS TIMESTAMP))", 'P0.25Y': "date_trunc('quarter', CAST({col} AS TIMESTAMP))", 'P1Y': "date_trunc('year', CAST({col} AS TIMESTAMP))", 'P1W/1970-01-03T00:00:00Z': "date_add('day', 5, date_trunc('week', \ date_add('day', 1, CAST({col} AS TIMESTAMP))))", '1969-12-28T00:00:00Z/P1W': "date_add('day', -1, date_trunc('week', \ date_add('day', 1, CAST({col} AS TIMESTAMP))))", } @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "from_iso8601_date('{}')".format(dttm.isoformat()[:10]) if tt == 'TIMESTAMP': return "from_iso8601_timestamp('{}')".format(dttm.isoformat()) return ("CAST ('{}' AS TIMESTAMP)" .format(dttm.strftime('%Y-%m-%d %H:%M:%S'))) @classmethod def epoch_to_dttm(cls): return 'from_unixtime({col})' class ClickHouseEngineSpec(BaseEngineSpec): engine = 'clickhouse' time_secondary_columns = True time_groupby_inline = True time_grain_functions = { None: '{col}', 'PT1M': 'toStartOfMinute(toDateTime({col}))', 'PT5M': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 300)*300)', 'PT10M': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 600)*600)', 'PT15M': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 900)*900)', 'PT0.5H': 'toDateTime(intDiv(toUInt32(toDateTime({col})), 1800)*1800)', 'PT1H': 'toStartOfHour(toDateTime({col}))', 'P1D': 'toStartOfDay(toDateTime({col}))', 'P1W': 'toMonday(toDateTime({col}))', 'P1M': 'toStartOfMonth(toDateTime({col}))', 'P0.25Y': 'toStartOfQuarter(toDateTime({col}))', 'P1Y': 'toStartOfYear(toDateTime({col}))', } @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "toDate('{}')".format(dttm.strftime('%Y-%m-%d')) if tt == 'DATETIME': return "toDateTime('{}')".format( dttm.strftime('%Y-%m-%d %H:%M:%S')) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) class BQEngineSpec(BaseEngineSpec): engine = 'bigquery' arraysize = 5000 time_grain_functions = { None: '{col}', 'PT1S': 'TIMESTAMP_TRUNC({col}, SECOND)', 'PT1M': 'TIMESTAMP_TRUNC({col}, MINUTE)', 'PT1H': 'TIMESTAMP_TRUNC({col}, HOUR)', 'P1D': 'TIMESTAMP_TRUNC({col}, DAY)', 'P1W': 'TIMESTAMP_TRUNC({col}, WEEK)', 'P1M': 'TIMESTAMP_TRUNC({col}, MONTH)', 'P0.25Y': 'TIMESTAMP_TRUNC({col}, QUARTER)', 'P1Y': 'TIMESTAMP_TRUNC({col}, YEAR)', } @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "'{}'".format(dttm.strftime('%Y-%m-%d')) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) @classmethod def fetch_data(cls, cursor, limit): data = super(BQEngineSpec, cls).fetch_data(cursor, limit) if len(data) != 0 and type(data[0]).__name__ == 'Row': data = [r.values() for r in data] return data @staticmethod def mutate_label(label): hashed_label = '_' + hashlib.md5(label.encode('utf-8')).hexdigest() # if label starts with number, add underscore as first character mutated_label = '_' + label if re.match(r'^\d', label) else label # replace non-alphanumeric characters with underscores mutated_label = re.sub(r'[^\w]+', '_', mutated_label) if mutated_label != label: # add md5 hash to label to avoid possible collisions mutated_label += hashed_label # return only hash if length of final label exceeds 128 chars return mutated_label if len(mutated_label) <= 128 else hashed_label @classmethod def extra_table_metadata(cls, database, table_name, schema_name): indexes = database.get_indexes(table_name, schema_name) if not indexes: return {} partitions_columns = [ index.get('column_names', []) for index in indexes if index.get('name') == 'partition' ] cluster_columns = [ index.get('column_names', []) for index in indexes if index.get('name') == 'clustering' ] return { 'partitions': { 'cols': partitions_columns, }, 'clustering': { 'cols': cluster_columns, }, } @classmethod def _get_fields(cls, cols): return [sqla.literal_column(c.get('name')).label(c.get('name').replace('.', '__')) for c in cols] class ImpalaEngineSpec(BaseEngineSpec): engine = 'impala' time_grain_functions = { None: '{col}', 'PT1M': "TRUNC({col}, 'MI')", 'PT1H': "TRUNC({col}, 'HH')", 'P1D': "TRUNC({col}, 'DD')", 'P1W': "TRUNC({col}, 'WW')", 'P1M': "TRUNC({col}, 'MONTH')", 'P0.25Y': "TRUNC({col}, 'Q')", 'P1Y': "TRUNC({col}, 'YYYY')", } @classmethod def epoch_to_dttm(cls): return 'from_unixtime({col})' @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "'{}'".format(dttm.strftime('%Y-%m-%d')) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) @classmethod def get_schema_names(cls, inspector): schemas = [row[0] for row in inspector.engine.execute('SHOW SCHEMAS') if not row[0].startswith('_')] return schemas class DruidEngineSpec(BaseEngineSpec): engine = 'druid' inner_joins = False allows_subquery = False time_grain_functions = { None: '{col}', 'PT1S': 'FLOOR({col} TO SECOND)', 'PT1M': 'FLOOR({col} TO MINUTE)', 'PT1H': 'FLOOR({col} TO HOUR)', 'P1D': 'FLOOR({col} TO DAY)', 'P1W': 'FLOOR({col} TO WEEK)', 'P1M': 'FLOOR({col} TO MONTH)', 'P0.25Y': 'FLOOR({col} TO QUARTER)', 'P1Y': 'FLOOR({col} TO YEAR)', } class GSheetsEngineSpec(SqliteEngineSpec): engine = 'gsheets' inner_joins = False allows_subquery = False class KylinEngineSpec(BaseEngineSpec): engine = 'kylin' time_grain_functions = { None: '{col}', 'PT1S': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO SECOND) AS TIMESTAMP)', 'PT1M': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MINUTE) AS TIMESTAMP)', 'PT1H': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO HOUR) AS TIMESTAMP)', 'P1D': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO DAY) AS DATE)', 'P1W': 'CAST(TIMESTAMPADD(WEEK, WEEK(CAST({col} AS DATE)) - 1, \ FLOOR(CAST({col} AS TIMESTAMP) TO YEAR)) AS DATE)', 'P1M': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MONTH) AS DATE)', 'P0.25Y': 'CAST(TIMESTAMPADD(QUARTER, QUARTER(CAST({col} AS DATE)) - 1, \ FLOOR(CAST({col} AS TIMESTAMP) TO YEAR)) AS DATE)', 'P1Y': 'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO YEAR) AS DATE)', } @classmethod def convert_dttm(cls, target_type, dttm): tt = target_type.upper() if tt == 'DATE': return "CAST('{}' AS DATE)".format(dttm.isoformat()[:10]) if tt == 'TIMESTAMP': return "CAST('{}' AS TIMESTAMP)".format( dttm.strftime('%Y-%m-%d %H:%M:%S')) return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S')) class TeradataEngineSpec(BaseEngineSpec): engine = 'teradata' limit_method = LimitMethod.WRAP_SQL time_grain_functions = { None: '{col}', 'PT1M': "TRUNC(CAST({col} as DATE), 'MI')", 'PT1H': "TRUNC(CAST({col} as DATE), 'HH')", 'P1D': "TRUNC(CAST({col} as DATE), 'DDD')", 'P1W': "TRUNC(CAST({col} as DATE), 'WW')", 'P1M': "TRUNC(CAST({col} as DATE), 'MONTH')", 'P0.25Y': "TRUNC(CAST({col} as DATE), 'Q')", 'P1Y': "TRUNC(CAST({col} as DATE), 'YEAR')", } engines = { o.engine: o for o in globals().values() if inspect.isclass(o) and issubclass(o, BaseEngineSpec)}
true
true
f724783584f78b08b18463f09ec4dd4c262a7666
2,714
py
Python
lib/metrics.py
ppmdatix/rtdl
a01ecd9ae6b673f4e82e51f804ffd7031c7350a0
[ "Apache-2.0" ]
298
2021-06-22T15:41:18.000Z
2022-03-09T07:52:30.000Z
lib/metrics.py
ppmdatix/rtdl
a01ecd9ae6b673f4e82e51f804ffd7031c7350a0
[ "Apache-2.0" ]
15
2021-07-27T05:39:21.000Z
2022-02-25T11:33:32.000Z
lib/metrics.py
ppmdatix/rtdl
a01ecd9ae6b673f4e82e51f804ffd7031c7350a0
[ "Apache-2.0" ]
37
2021-06-25T03:56:37.000Z
2022-03-10T11:07:51.000Z
import typing as ty import numpy as np import scipy.special import sklearn.metrics as skm from . import util def calculate_metrics( task_type: str, y: np.ndarray, prediction: np.ndarray, classification_mode: str, y_info: ty.Optional[ty.Dict[str, ty.Any]], ) -> ty.Dict[str, float]: if task_type == util.REGRESSION: del classification_mode rmse = skm.mean_squared_error(y, prediction) ** 0.5 # type: ignore[code] if y_info: if y_info['policy'] == 'mean_std': rmse *= y_info['std'] else: assert False return {'rmse': rmse, 'score': -rmse} else: assert task_type in (util.BINCLASS, util.MULTICLASS) labels = None if classification_mode == 'probs': probs = prediction elif classification_mode == 'logits': probs = ( scipy.special.expit(prediction) if task_type == util.BINCLASS else scipy.special.softmax(prediction, axis=1) ) else: assert classification_mode == 'labels' probs = None labels = prediction if labels is None: labels = ( np.round(probs).astype('int64') if task_type == util.BINCLASS else probs.argmax(axis=1) # type: ignore[code] ) result = skm.classification_report(y, labels, output_dict=True) # type: ignore[code] if task_type == util.BINCLASS: result['roc_auc'] = skm.roc_auc_score(y, probs) # type: ignore[code] result['score'] = result['accuracy'] # type: ignore[code] return result # type: ignore[code] def make_summary(metrics: ty.Dict[str, ty.Any]) -> str: precision = 3 summary = {} for k, v in metrics.items(): if k.isdigit(): continue k = { 'score': 'SCORE', 'accuracy': 'acc', 'roc_auc': 'roc_auc', 'macro avg': 'm', 'weighted avg': 'w', }.get(k, k) if isinstance(v, float): v = round(v, precision) summary[k] = v else: v = { {'precision': 'p', 'recall': 'r', 'f1-score': 'f1', 'support': 's'}.get( x, x ): round(v[x], precision) for x in v } for item in v.items(): summary[k + item[0]] = item[1] s = [f'score = {summary.pop("SCORE"):.3f}'] for k, v in summary.items(): if k not in ['mp', 'mr', 'wp', 'wr']: # just to save screen space s.append(f'{k} = {v}') return ' | '.join(s)
31.55814
93
0.507369
import typing as ty import numpy as np import scipy.special import sklearn.metrics as skm from . import util def calculate_metrics( task_type: str, y: np.ndarray, prediction: np.ndarray, classification_mode: str, y_info: ty.Optional[ty.Dict[str, ty.Any]], ) -> ty.Dict[str, float]: if task_type == util.REGRESSION: del classification_mode rmse = skm.mean_squared_error(y, prediction) ** 0.5 if y_info: if y_info['policy'] == 'mean_std': rmse *= y_info['std'] else: assert False return {'rmse': rmse, 'score': -rmse} else: assert task_type in (util.BINCLASS, util.MULTICLASS) labels = None if classification_mode == 'probs': probs = prediction elif classification_mode == 'logits': probs = ( scipy.special.expit(prediction) if task_type == util.BINCLASS else scipy.special.softmax(prediction, axis=1) ) else: assert classification_mode == 'labels' probs = None labels = prediction if labels is None: labels = ( np.round(probs).astype('int64') if task_type == util.BINCLASS else probs.argmax(axis=1) ) result = skm.classification_report(y, labels, output_dict=True) if task_type == util.BINCLASS: result['roc_auc'] = skm.roc_auc_score(y, probs) result['score'] = result['accuracy'] return result def make_summary(metrics: ty.Dict[str, ty.Any]) -> str: precision = 3 summary = {} for k, v in metrics.items(): if k.isdigit(): continue k = { 'score': 'SCORE', 'accuracy': 'acc', 'roc_auc': 'roc_auc', 'macro avg': 'm', 'weighted avg': 'w', }.get(k, k) if isinstance(v, float): v = round(v, precision) summary[k] = v else: v = { {'precision': 'p', 'recall': 'r', 'f1-score': 'f1', 'support': 's'}.get( x, x ): round(v[x], precision) for x in v } for item in v.items(): summary[k + item[0]] = item[1] s = [f'score = {summary.pop("SCORE"):.3f}'] for k, v in summary.items(): if k not in ['mp', 'mr', 'wp', 'wr']: s.append(f'{k} = {v}') return ' | '.join(s)
true
true
f724797778a03ff70b6b2d2cacc77e8f0dc791c8
1,451
py
Python
api/tests.py
toast38coza/KongOAuth
827d6f0cb47c67903f0a0236f56cd20c18bb84bb
[ "MIT" ]
null
null
null
api/tests.py
toast38coza/KongOAuth
827d6f0cb47c67903f0a0236f56cd20c18bb84bb
[ "MIT" ]
null
null
null
api/tests.py
toast38coza/KongOAuth
827d6f0cb47c67903f0a0236f56cd20c18bb84bb
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.test import TestCase, override_settings from django.contrib.auth import get_user_model import json import responses ## responses: def kong_login_success(): responses.add( responses.POST, 'https://kong:8443/test/oauth2/token', body=json.dumps({'refresh_token': 'rtoken', 'token_type': 'bearer', 'access_token': 'atoken', 'expires_in': 7200}), status=200, content_type='application/json' ) class OAuthTestCase(TestCase): def login(self, data): return self.client.post('/oauth2/token/', json.dumps(data), content_type="application/json") @override_settings(KONG_GATEWAY_URL='https://kong:8443') @responses.activate def setUp(self): self.user = get_user_model().objects.create_user(username='admin', password='testtest1234') data = { "username": "admin", "password": "testtest1234", "client_id": "cliendid", "client_secret": "secret" } kong_login_success() self.result = self.login(data) def test_is_ok(self): assert self.result.status_code == 200 @responses.activate # assert no response is made def test_invalid_login_returns_401(self): data = { "username": "foo", "password": "bar", } result = self.login(data) assert result.status_code == 401
29.612245
123
0.637491
from __future__ import unicode_literals from django.test import TestCase, override_settings from django.contrib.auth import get_user_model import json import responses gin_success(): responses.add( responses.POST, 'https://kong:8443/test/oauth2/token', body=json.dumps({'refresh_token': 'rtoken', 'token_type': 'bearer', 'access_token': 'atoken', 'expires_in': 7200}), status=200, content_type='application/json' ) class OAuthTestCase(TestCase): def login(self, data): return self.client.post('/oauth2/token/', json.dumps(data), content_type="application/json") @override_settings(KONG_GATEWAY_URL='https://kong:8443') @responses.activate def setUp(self): self.user = get_user_model().objects.create_user(username='admin', password='testtest1234') data = { "username": "admin", "password": "testtest1234", "client_id": "cliendid", "client_secret": "secret" } kong_login_success() self.result = self.login(data) def test_is_ok(self): assert self.result.status_code == 200 @responses.activate def test_invalid_login_returns_401(self): data = { "username": "foo", "password": "bar", } result = self.login(data) assert result.status_code == 401
true
true
f7247a8886b4c59ba58b43b090bbe0a5d941f51a
9,304
py
Python
monai/transforms/__init__.py
marksgraham/MONAI
42591511e9493fedd70af857344cc91073b867e8
[ "Apache-2.0" ]
null
null
null
monai/transforms/__init__.py
marksgraham/MONAI
42591511e9493fedd70af857344cc91073b867e8
[ "Apache-2.0" ]
null
null
null
monai/transforms/__init__.py
marksgraham/MONAI
42591511e9493fedd70af857344cc91073b867e8
[ "Apache-2.0" ]
1
2021-01-19T19:35:00.000Z
2021-01-19T19:35:00.000Z
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .adaptors import FunctionSignature, adaptor, apply_alias, to_kwargs from .compose import Compose from .croppad.array import ( BorderPad, BoundingRect, CenterSpatialCrop, CropForeground, DivisiblePad, RandCropByPosNegLabel, RandSpatialCrop, RandSpatialCropSamples, RandWeightedCrop, ResizeWithPadOrCrop, SpatialCrop, SpatialPad, ) from .croppad.batch import PadListDataCollate from .croppad.dictionary import ( BorderPadd, BorderPadD, BorderPadDict, BoundingRectd, BoundingRectD, BoundingRectDict, CenterSpatialCropd, CenterSpatialCropD, CenterSpatialCropDict, CropForegroundd, CropForegroundD, CropForegroundDict, DivisiblePadd, DivisiblePadD, DivisiblePadDict, NumpyPadModeSequence, RandCropByPosNegLabeld, RandCropByPosNegLabelD, RandCropByPosNegLabelDict, RandSpatialCropd, RandSpatialCropD, RandSpatialCropDict, RandSpatialCropSamplesd, RandSpatialCropSamplesD, RandSpatialCropSamplesDict, RandWeightedCropd, RandWeightedCropD, RandWeightedCropDict, ResizeWithPadOrCropd, ResizeWithPadOrCropD, ResizeWithPadOrCropDict, SpatialCropd, SpatialCropD, SpatialCropDict, SpatialPadd, SpatialPadD, SpatialPadDict, ) from .intensity.array import ( AdjustContrast, DetectEnvelope, GaussianSharpen, GaussianSmooth, MaskIntensity, NormalizeIntensity, RandAdjustContrast, RandBiasField, RandGaussianNoise, RandGaussianSharpen, RandGaussianSmooth, RandHistogramShift, RandRicianNoise, RandScaleIntensity, RandShiftIntensity, RandStdShiftIntensity, SavitzkyGolaySmooth, ScaleIntensity, ScaleIntensityRange, ScaleIntensityRangePercentiles, ShiftIntensity, StdShiftIntensity, ThresholdIntensity, ) from .intensity.dictionary import ( AdjustContrastd, AdjustContrastD, AdjustContrastDict, GaussianSharpend, GaussianSharpenD, GaussianSharpenDict, GaussianSmoothd, GaussianSmoothD, GaussianSmoothDict, MaskIntensityd, MaskIntensityD, MaskIntensityDict, NormalizeIntensityd, NormalizeIntensityD, NormalizeIntensityDict, RandAdjustContrastd, RandAdjustContrastD, RandAdjustContrastDict, RandBiasFieldd, RandBiasFieldD, RandBiasFieldDict, RandGaussianNoised, RandGaussianNoiseD, RandGaussianNoiseDict, RandGaussianSharpend, RandGaussianSharpenD, RandGaussianSharpenDict, RandGaussianSmoothd, RandGaussianSmoothD, RandGaussianSmoothDict, RandHistogramShiftd, RandHistogramShiftD, RandHistogramShiftDict, RandRicianNoised, RandRicianNoiseD, RandRicianNoiseDict, RandScaleIntensityd, RandScaleIntensityD, RandScaleIntensityDict, RandShiftIntensityd, RandShiftIntensityD, RandShiftIntensityDict, RandStdShiftIntensityd, RandStdShiftIntensityD, RandStdShiftIntensityDict, ScaleIntensityd, ScaleIntensityD, ScaleIntensityDict, ScaleIntensityRanged, ScaleIntensityRangeD, ScaleIntensityRangeDict, ScaleIntensityRangePercentilesd, ScaleIntensityRangePercentilesD, ScaleIntensityRangePercentilesDict, ShiftIntensityd, ShiftIntensityD, ShiftIntensityDict, StdShiftIntensityd, StdShiftIntensityD, StdShiftIntensityDict, ThresholdIntensityd, ThresholdIntensityD, ThresholdIntensityDict, ) from .inverse import InvertibleTransform from .io.array import LoadImage, SaveImage from .io.dictionary import LoadImaged, LoadImageD, LoadImageDict, SaveImaged, SaveImageD, SaveImageDict from .post.array import ( Activations, AsDiscrete, KeepLargestConnectedComponent, LabelToContour, MeanEnsemble, ProbNMS, VoteEnsemble, ) from .post.dictionary import ( Activationsd, ActivationsD, ActivationsDict, AsDiscreted, AsDiscreteD, AsDiscreteDict, Decollated, DecollateD, DecollateDict, Ensembled, KeepLargestConnectedComponentd, KeepLargestConnectedComponentD, KeepLargestConnectedComponentDict, LabelToContourd, LabelToContourD, LabelToContourDict, MeanEnsembled, MeanEnsembleD, MeanEnsembleDict, ProbNMSd, ProbNMSD, ProbNMSDict, VoteEnsembled, VoteEnsembleD, VoteEnsembleDict, ) from .spatial.array import ( Affine, AffineGrid, Flip, Orientation, Rand2DElastic, Rand3DElastic, RandAffine, RandAffineGrid, RandAxisFlip, RandDeformGrid, RandFlip, RandRotate, RandRotate90, RandZoom, Resample, Resize, Rotate, Rotate90, Spacing, Zoom, ) from .spatial.dictionary import ( Affined, AffineD, AffineDict, Flipd, FlipD, FlipDict, Orientationd, OrientationD, OrientationDict, Rand2DElasticd, Rand2DElasticD, Rand2DElasticDict, Rand3DElasticd, Rand3DElasticD, Rand3DElasticDict, RandAffined, RandAffineD, RandAffineDict, RandAxisFlipd, RandAxisFlipD, RandAxisFlipDict, RandFlipd, RandFlipD, RandFlipDict, RandRotate90d, RandRotate90D, RandRotate90Dict, RandRotated, RandRotateD, RandRotateDict, RandZoomd, RandZoomD, RandZoomDict, Resized, ResizeD, ResizeDict, Rotate90d, Rotate90D, Rotate90Dict, Rotated, RotateD, RotateDict, Spacingd, SpacingD, SpacingDict, Zoomd, ZoomD, ZoomDict, ) from .transform import MapTransform, Randomizable, RandomizableTransform, Transform, apply_transform from .utility.array import ( AddChannel, AddExtremePointsChannel, AsChannelFirst, AsChannelLast, CastToType, ConvertToMultiChannelBasedOnBratsClasses, DataStats, EnsureChannelFirst, FgBgToIndices, Identity, LabelToMask, Lambda, MapLabelValue, RemoveRepeatedChannel, RepeatChannel, SimulateDelay, SplitChannel, SqueezeDim, ToCupy, ToNumpy, ToPIL, TorchVision, ToTensor, Transpose, ) from .utility.dictionary import ( AddChanneld, AddChannelD, AddChannelDict, AddExtremePointsChanneld, AddExtremePointsChannelD, AddExtremePointsChannelDict, AsChannelFirstd, AsChannelFirstD, AsChannelFirstDict, AsChannelLastd, AsChannelLastD, AsChannelLastDict, CastToTyped, CastToTypeD, CastToTypeDict, ConcatItemsd, ConcatItemsD, ConcatItemsDict, ConvertToMultiChannelBasedOnBratsClassesd, ConvertToMultiChannelBasedOnBratsClassesD, ConvertToMultiChannelBasedOnBratsClassesDict, CopyItemsd, CopyItemsD, CopyItemsDict, DataStatsd, DataStatsD, DataStatsDict, DeleteItemsd, DeleteItemsD, DeleteItemsDict, EnsureChannelFirstd, EnsureChannelFirstD, EnsureChannelFirstDict, FgBgToIndicesd, FgBgToIndicesD, FgBgToIndicesDict, Identityd, IdentityD, IdentityDict, LabelToMaskd, LabelToMaskD, LabelToMaskDict, Lambdad, LambdaD, LambdaDict, MapLabelValued, MapLabelValueD, MapLabelValueDict, RandLambdad, RandLambdaD, RandLambdaDict, RandTorchVisiond, RandTorchVisionD, RandTorchVisionDict, RemoveRepeatedChanneld, RemoveRepeatedChannelD, RemoveRepeatedChannelDict, RepeatChanneld, RepeatChannelD, RepeatChannelDict, SelectItemsd, SelectItemsD, SelectItemsDict, SimulateDelayd, SimulateDelayD, SimulateDelayDict, SplitChanneld, SplitChannelD, SplitChannelDict, SqueezeDimd, SqueezeDimD, SqueezeDimDict, ToCupyd, ToCupyD, ToCupyDict, ToNumpyd, ToNumpyD, ToNumpyDict, ToPILd, ToPILD, ToPILDict, TorchVisiond, TorchVisionD, TorchVisionDict, ToTensord, ToTensorD, ToTensorDict, Transposed, TransposeD, TransposeDict, ) from .utils import ( allow_missing_keys_mode, compute_divisible_spatial_size, convert_inverse_interp_mode, copypaste_arrays, create_control_grid, create_grid, create_rotate, create_scale, create_shear, create_translate, extreme_points_to_image, generate_pos_neg_label_crop_centers, generate_spatial_bounding_box, get_extreme_points, get_largest_connected_component_mask, img_bounds, in_bounds, is_empty, is_positive, map_binary_to_indices, map_spatial_axes, rand_choice, rescale_array, rescale_array_int_max, rescale_instance_array, resize_center, weighted_patch_samples, zero_margins, )
22.258373
103
0.725064
from .adaptors import FunctionSignature, adaptor, apply_alias, to_kwargs from .compose import Compose from .croppad.array import ( BorderPad, BoundingRect, CenterSpatialCrop, CropForeground, DivisiblePad, RandCropByPosNegLabel, RandSpatialCrop, RandSpatialCropSamples, RandWeightedCrop, ResizeWithPadOrCrop, SpatialCrop, SpatialPad, ) from .croppad.batch import PadListDataCollate from .croppad.dictionary import ( BorderPadd, BorderPadD, BorderPadDict, BoundingRectd, BoundingRectD, BoundingRectDict, CenterSpatialCropd, CenterSpatialCropD, CenterSpatialCropDict, CropForegroundd, CropForegroundD, CropForegroundDict, DivisiblePadd, DivisiblePadD, DivisiblePadDict, NumpyPadModeSequence, RandCropByPosNegLabeld, RandCropByPosNegLabelD, RandCropByPosNegLabelDict, RandSpatialCropd, RandSpatialCropD, RandSpatialCropDict, RandSpatialCropSamplesd, RandSpatialCropSamplesD, RandSpatialCropSamplesDict, RandWeightedCropd, RandWeightedCropD, RandWeightedCropDict, ResizeWithPadOrCropd, ResizeWithPadOrCropD, ResizeWithPadOrCropDict, SpatialCropd, SpatialCropD, SpatialCropDict, SpatialPadd, SpatialPadD, SpatialPadDict, ) from .intensity.array import ( AdjustContrast, DetectEnvelope, GaussianSharpen, GaussianSmooth, MaskIntensity, NormalizeIntensity, RandAdjustContrast, RandBiasField, RandGaussianNoise, RandGaussianSharpen, RandGaussianSmooth, RandHistogramShift, RandRicianNoise, RandScaleIntensity, RandShiftIntensity, RandStdShiftIntensity, SavitzkyGolaySmooth, ScaleIntensity, ScaleIntensityRange, ScaleIntensityRangePercentiles, ShiftIntensity, StdShiftIntensity, ThresholdIntensity, ) from .intensity.dictionary import ( AdjustContrastd, AdjustContrastD, AdjustContrastDict, GaussianSharpend, GaussianSharpenD, GaussianSharpenDict, GaussianSmoothd, GaussianSmoothD, GaussianSmoothDict, MaskIntensityd, MaskIntensityD, MaskIntensityDict, NormalizeIntensityd, NormalizeIntensityD, NormalizeIntensityDict, RandAdjustContrastd, RandAdjustContrastD, RandAdjustContrastDict, RandBiasFieldd, RandBiasFieldD, RandBiasFieldDict, RandGaussianNoised, RandGaussianNoiseD, RandGaussianNoiseDict, RandGaussianSharpend, RandGaussianSharpenD, RandGaussianSharpenDict, RandGaussianSmoothd, RandGaussianSmoothD, RandGaussianSmoothDict, RandHistogramShiftd, RandHistogramShiftD, RandHistogramShiftDict, RandRicianNoised, RandRicianNoiseD, RandRicianNoiseDict, RandScaleIntensityd, RandScaleIntensityD, RandScaleIntensityDict, RandShiftIntensityd, RandShiftIntensityD, RandShiftIntensityDict, RandStdShiftIntensityd, RandStdShiftIntensityD, RandStdShiftIntensityDict, ScaleIntensityd, ScaleIntensityD, ScaleIntensityDict, ScaleIntensityRanged, ScaleIntensityRangeD, ScaleIntensityRangeDict, ScaleIntensityRangePercentilesd, ScaleIntensityRangePercentilesD, ScaleIntensityRangePercentilesDict, ShiftIntensityd, ShiftIntensityD, ShiftIntensityDict, StdShiftIntensityd, StdShiftIntensityD, StdShiftIntensityDict, ThresholdIntensityd, ThresholdIntensityD, ThresholdIntensityDict, ) from .inverse import InvertibleTransform from .io.array import LoadImage, SaveImage from .io.dictionary import LoadImaged, LoadImageD, LoadImageDict, SaveImaged, SaveImageD, SaveImageDict from .post.array import ( Activations, AsDiscrete, KeepLargestConnectedComponent, LabelToContour, MeanEnsemble, ProbNMS, VoteEnsemble, ) from .post.dictionary import ( Activationsd, ActivationsD, ActivationsDict, AsDiscreted, AsDiscreteD, AsDiscreteDict, Decollated, DecollateD, DecollateDict, Ensembled, KeepLargestConnectedComponentd, KeepLargestConnectedComponentD, KeepLargestConnectedComponentDict, LabelToContourd, LabelToContourD, LabelToContourDict, MeanEnsembled, MeanEnsembleD, MeanEnsembleDict, ProbNMSd, ProbNMSD, ProbNMSDict, VoteEnsembled, VoteEnsembleD, VoteEnsembleDict, ) from .spatial.array import ( Affine, AffineGrid, Flip, Orientation, Rand2DElastic, Rand3DElastic, RandAffine, RandAffineGrid, RandAxisFlip, RandDeformGrid, RandFlip, RandRotate, RandRotate90, RandZoom, Resample, Resize, Rotate, Rotate90, Spacing, Zoom, ) from .spatial.dictionary import ( Affined, AffineD, AffineDict, Flipd, FlipD, FlipDict, Orientationd, OrientationD, OrientationDict, Rand2DElasticd, Rand2DElasticD, Rand2DElasticDict, Rand3DElasticd, Rand3DElasticD, Rand3DElasticDict, RandAffined, RandAffineD, RandAffineDict, RandAxisFlipd, RandAxisFlipD, RandAxisFlipDict, RandFlipd, RandFlipD, RandFlipDict, RandRotate90d, RandRotate90D, RandRotate90Dict, RandRotated, RandRotateD, RandRotateDict, RandZoomd, RandZoomD, RandZoomDict, Resized, ResizeD, ResizeDict, Rotate90d, Rotate90D, Rotate90Dict, Rotated, RotateD, RotateDict, Spacingd, SpacingD, SpacingDict, Zoomd, ZoomD, ZoomDict, ) from .transform import MapTransform, Randomizable, RandomizableTransform, Transform, apply_transform from .utility.array import ( AddChannel, AddExtremePointsChannel, AsChannelFirst, AsChannelLast, CastToType, ConvertToMultiChannelBasedOnBratsClasses, DataStats, EnsureChannelFirst, FgBgToIndices, Identity, LabelToMask, Lambda, MapLabelValue, RemoveRepeatedChannel, RepeatChannel, SimulateDelay, SplitChannel, SqueezeDim, ToCupy, ToNumpy, ToPIL, TorchVision, ToTensor, Transpose, ) from .utility.dictionary import ( AddChanneld, AddChannelD, AddChannelDict, AddExtremePointsChanneld, AddExtremePointsChannelD, AddExtremePointsChannelDict, AsChannelFirstd, AsChannelFirstD, AsChannelFirstDict, AsChannelLastd, AsChannelLastD, AsChannelLastDict, CastToTyped, CastToTypeD, CastToTypeDict, ConcatItemsd, ConcatItemsD, ConcatItemsDict, ConvertToMultiChannelBasedOnBratsClassesd, ConvertToMultiChannelBasedOnBratsClassesD, ConvertToMultiChannelBasedOnBratsClassesDict, CopyItemsd, CopyItemsD, CopyItemsDict, DataStatsd, DataStatsD, DataStatsDict, DeleteItemsd, DeleteItemsD, DeleteItemsDict, EnsureChannelFirstd, EnsureChannelFirstD, EnsureChannelFirstDict, FgBgToIndicesd, FgBgToIndicesD, FgBgToIndicesDict, Identityd, IdentityD, IdentityDict, LabelToMaskd, LabelToMaskD, LabelToMaskDict, Lambdad, LambdaD, LambdaDict, MapLabelValued, MapLabelValueD, MapLabelValueDict, RandLambdad, RandLambdaD, RandLambdaDict, RandTorchVisiond, RandTorchVisionD, RandTorchVisionDict, RemoveRepeatedChanneld, RemoveRepeatedChannelD, RemoveRepeatedChannelDict, RepeatChanneld, RepeatChannelD, RepeatChannelDict, SelectItemsd, SelectItemsD, SelectItemsDict, SimulateDelayd, SimulateDelayD, SimulateDelayDict, SplitChanneld, SplitChannelD, SplitChannelDict, SqueezeDimd, SqueezeDimD, SqueezeDimDict, ToCupyd, ToCupyD, ToCupyDict, ToNumpyd, ToNumpyD, ToNumpyDict, ToPILd, ToPILD, ToPILDict, TorchVisiond, TorchVisionD, TorchVisionDict, ToTensord, ToTensorD, ToTensorDict, Transposed, TransposeD, TransposeDict, ) from .utils import ( allow_missing_keys_mode, compute_divisible_spatial_size, convert_inverse_interp_mode, copypaste_arrays, create_control_grid, create_grid, create_rotate, create_scale, create_shear, create_translate, extreme_points_to_image, generate_pos_neg_label_crop_centers, generate_spatial_bounding_box, get_extreme_points, get_largest_connected_component_mask, img_bounds, in_bounds, is_empty, is_positive, map_binary_to_indices, map_spatial_axes, rand_choice, rescale_array, rescale_array_int_max, rescale_instance_array, resize_center, weighted_patch_samples, zero_margins, )
true
true
f7247b2dbd9cf7eb773ad8e4856771996587a897
48
py
Python
samcli/__init__.py
kylelaker/aws-sam-cli
d2917102ef56ac05b9973f96c716612f9638bb62
[ "BSD-2-Clause", "Apache-2.0" ]
null
null
null
samcli/__init__.py
kylelaker/aws-sam-cli
d2917102ef56ac05b9973f96c716612f9638bb62
[ "BSD-2-Clause", "Apache-2.0" ]
null
null
null
samcli/__init__.py
kylelaker/aws-sam-cli
d2917102ef56ac05b9973f96c716612f9638bb62
[ "BSD-2-Clause", "Apache-2.0" ]
null
null
null
""" SAM CLI version """ __version__ = "1.13.1"
8
22
0.583333
__version__ = "1.13.1"
true
true
f7247b7c5f90f8592a8c662974c56a475935ed18
1,394
py
Python
505 The Maze II.py
krishna13052001/LeetCode
cd6ec626bea61f0bd9e8493622074f9e69a7a1c3
[ "MIT" ]
872
2015-06-15T12:02:41.000Z
2022-03-30T08:44:35.000Z
505 The Maze II.py
nadeemshaikh-github/LeetCode
3fb14aeea62a960442e47dfde9f964c7ffce32be
[ "MIT" ]
8
2015-06-21T15:11:59.000Z
2022-02-01T11:22:34.000Z
505 The Maze II.py
nadeemshaikh-github/LeetCode
3fb14aeea62a960442e47dfde9f964c7ffce32be
[ "MIT" ]
328
2015-06-28T03:10:35.000Z
2022-03-29T11:05:28.000Z
#!/usr/bin/python3 """ premium question """ from typing import List import heapq dirs = [(0, -1), (0, 1), (-1, 0), (1, 0)] class Solution: def shortestDistance(self, maze: List[List[int]], start: List[int], destination: List[int]) -> int: """ No friction rolling ball F[i][j][dir] = min distance given direction S[i][j] = whether stoppable Dijkstra's algorith, reduce to a graph problem """ m, n = len(maze), len(maze[0]) D = [[float("inf") for _ in range(n)] for _ in range(m)] # distance matrix i, j = start D[i][j] = 0 q = [(0, i, j)] while q: dist, i, j = heapq.heappop(q) for di, dj in dirs: cur_dist = 0 I = i J = j # look ahead while 0 <= I + di < m and 0 <= J + dj < n and maze[I + di][J + dj] == 0: I += di J += dj cur_dist += 1 if dist + cur_dist < D[I][J]: D[I][J] = dist + cur_dist heapq.heappush(q, (D[I][J], I, J)) i, j = destination return D[i][j] if D[i][j] != float("inf") else -1 if __name__ == "__main__": assert Solution().shortestDistance([[0,0,1,0,0],[0,0,0,0,0],[0,0,0,1,0],[1,1,0,1,1],[0,0,0,0,0]], [0,4], [4,4]) == 12
28.44898
121
0.444763
from typing import List import heapq dirs = [(0, -1), (0, 1), (-1, 0), (1, 0)] class Solution: def shortestDistance(self, maze: List[List[int]], start: List[int], destination: List[int]) -> int: m, n = len(maze), len(maze[0]) D = [[float("inf") for _ in range(n)] for _ in range(m)] i, j = start D[i][j] = 0 q = [(0, i, j)] while q: dist, i, j = heapq.heappop(q) for di, dj in dirs: cur_dist = 0 I = i J = j while 0 <= I + di < m and 0 <= J + dj < n and maze[I + di][J + dj] == 0: I += di J += dj cur_dist += 1 if dist + cur_dist < D[I][J]: D[I][J] = dist + cur_dist heapq.heappush(q, (D[I][J], I, J)) i, j = destination return D[i][j] if D[i][j] != float("inf") else -1 if __name__ == "__main__": assert Solution().shortestDistance([[0,0,1,0,0],[0,0,0,0,0],[0,0,0,1,0],[1,1,0,1,1],[0,0,0,0,0]], [0,4], [4,4]) == 12
true
true
f7247bcaf5d604756097f5b9a64dc5e8efcee241
1,801
py
Python
src/augmented_pickle.py
opskrift/expman
637bbef34d79ce03311889ce310797e78a9f7710
[ "MIT" ]
null
null
null
src/augmented_pickle.py
opskrift/expman
637bbef34d79ce03311889ce310797e78a9f7710
[ "MIT" ]
16
2021-08-04T12:08:00.000Z
2021-09-12T13:01:27.000Z
src/augmented_pickle.py
opskrift/opskrift
637bbef34d79ce03311889ce310797e78a9f7710
[ "MIT" ]
null
null
null
""" Suppose you have some input data sources `data_in` on which you apply some process `F` parameterized by `args`: data_out = F(data_in, args) You want to serialize `data_out`, but also don't want to lose `args`, to preserve the exact setup that generated the output data. Now suppose you want to inspect `args` for a particular `data_out`: - Saving both `{"data": data_out, "args": args}` may not be a viable solution, as `data_out` needs to be fully loaded into memory without actually needing it. - Saving `data_out` and `args` separately necessitates extra care to keep them tied together. Solution: define a simple data format -- *augmented pickle* <metadata> <body (actual data)> Pickle both objects, but read body on-demand: res = read_augmented_pickle("./data.apkl", get_body=True) # get metadata (body is not loaded) meta = next(res) # query the generator again to get body (data) data = next(res) """ import pickle from os import PathLike from typing import Any, Iterable, Union def write_augmented_pickle( metadata: Any, body: Any, path: Union[str, PathLike], ) -> None: """Write an augmented pickle file containing `metadata` and `body`.""" with open(path, "wb") as fp: pickle.dump(metadata, fp) pickle.dump(body, fp) def read_augmented_pickle( path: Union[str, PathLike], get_body: bool, ) -> Iterable[Any]: """Read an augmented pickle file containing `metadata` and `body`. Returns a generator that can be queried on-demand using `next`. If `get_body` is False, only `metadata` is yielded. """ with open(path, "rb") as fp: metadata = pickle.load(fp) yield metadata if not get_body: return body = pickle.load(fp) yield body
27.287879
111
0.675736
import pickle from os import PathLike from typing import Any, Iterable, Union def write_augmented_pickle( metadata: Any, body: Any, path: Union[str, PathLike], ) -> None: with open(path, "wb") as fp: pickle.dump(metadata, fp) pickle.dump(body, fp) def read_augmented_pickle( path: Union[str, PathLike], get_body: bool, ) -> Iterable[Any]: with open(path, "rb") as fp: metadata = pickle.load(fp) yield metadata if not get_body: return body = pickle.load(fp) yield body
true
true
f7247d937c3d515dd43275659017e44fc03cb44c
1,247
py
Python
crawling/crawler.py
LukasTinnes/sPyRat
42e012e426befa3876e590be2ea83874d5351d12
[ "Unlicense" ]
null
null
null
crawling/crawler.py
LukasTinnes/sPyRat
42e012e426befa3876e590be2ea83874d5351d12
[ "Unlicense" ]
3
2022-02-07T19:53:47.000Z
2022-02-13T19:51:33.000Z
crawling/crawler.py
LukasTinnes/sPyRat
42e012e426befa3876e590be2ea83874d5351d12
[ "Unlicense" ]
null
null
null
from abc import abstractmethod from crawling.crawler_data_structures.crawl_data import CrawlData class Crawler: """ An abstract class for other Crawlers to inherit from. A Crawler should open a given file and attempt to find an associated file pattern at every byte in the given file. """ @abstractmethod def crawl(self, file: str) -> CrawlData: """ Crawls a file to find a certain file pattern at every byte. :param file: The file path :return: """ ... @abstractmethod def crawl_in_range(self, file: str, start_byte: int, end_byte: int) -> CrawlData: """ Crawls the file for a file pattern between the start byte (inclusive) and the end_byte (exclusive). :param file: Tjhe file path :param start_byte: The byte to start crawling at. :param end_byte: The byte to end crawling at. :return: """ ... @abstractmethod def crawl_at_byte(self, file:str, start_byte: int = 0) -> CrawlData: """ Crawls for a file pattern at the specific byte given. :param file: The filepath. :param start_byte: The byte to start crawling at. :return: """ ...
31.175
118
0.623095
from abc import abstractmethod from crawling.crawler_data_structures.crawl_data import CrawlData class Crawler: @abstractmethod def crawl(self, file: str) -> CrawlData: ... @abstractmethod def crawl_in_range(self, file: str, start_byte: int, end_byte: int) -> CrawlData: ... @abstractmethod def crawl_at_byte(self, file:str, start_byte: int = 0) -> CrawlData: ...
true
true
f7247d9b14431ca407254cad9d929acd151162dc
9,493
py
Python
docs/conf.py
benedikt-mangold/obfuscate
4e51b3c3c6d3d869a742f036234632c77cebcb54
[ "MIT" ]
null
null
null
docs/conf.py
benedikt-mangold/obfuscate
4e51b3c3c6d3d869a742f036234632c77cebcb54
[ "MIT" ]
1
2021-06-01T14:48:09.000Z
2021-06-01T14:48:09.000Z
docs/conf.py
benedikt-mangold/obfuscate
4e51b3c3c6d3d869a742f036234632c77cebcb54
[ "MIT" ]
null
null
null
# This file is execfile()d with the current directory set to its containing dir. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys import inspect import shutil # -- Path setup -------------------------------------------------------------- __location__ = os.path.join( os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe())) ) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.join(__location__, "../src")) # -- Run sphinx-apidoc ------------------------------------------------------- # This hack is necessary since RTD does not issue `sphinx-apidoc` before running # `sphinx-build -b html . _build/html`. See Issue: # https://github.com/rtfd/readthedocs.org/issues/1139 # DON'T FORGET: Check the box "Install your project inside a virtualenv using # setup.py install" in the RTD Advanced Settings. # Additionally it helps us to avoid running apidoc manually try: # for Sphinx >= 1.7 from sphinx.ext import apidoc except ImportError: from sphinx import apidoc output_dir = os.path.join(__location__, "api") module_dir = os.path.join(__location__, "../src/obfuscator") try: shutil.rmtree(output_dir) except FileNotFoundError: pass try: import sphinx cmd_line_template = ( "sphinx-apidoc --implicit-namespaces -f -o {outputdir} {moduledir}" ) cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir) args = cmd_line.split(" ") if tuple(sphinx.__version__.split(".")) >= ("1", "7"): # This is a rudimentary parse_version to avoid external dependencies args = args[1:] apidoc.main(args) except Exception as e: print("Running `sphinx-apidoc` failed!\n{}".format(e)) # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.todo", "sphinx.ext.autosummary", "sphinx.ext.viewcode", "sphinx.ext.coverage", "sphinx.ext.doctest", "sphinx.ext.ifconfig", "sphinx.ext.mathjax", "sphinx.ext.napoleon", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix of source filenames. source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = "obfuscator" copyright = "2021, Benedikt Mangold" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "" # Is set by calling `setup.py docs` # The full version, including alpha/beta/rc tags. release = "" # Is set by calling `setup.py docs` # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".venv"] # The reST default role (used for this markup: `text`) to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "sidebar_width": "300px", "page_width": "1200px" } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". try: from obfuscator import __version__ as version except ImportError: pass else: release = version # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = "" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = "obfuscator-doc" # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ("letterpaper" or "a4paper"). # "papersize": "letterpaper", # The font size ("10pt", "11pt" or "12pt"). # "pointsize": "10pt", # Additional stuff for the LaTeX preamble. # "preamble": "", } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ("index", "user_guide.tex", "obfuscator Documentation", "Benedikt Mangold", "manual") ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = "" # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- External mapping -------------------------------------------------------- python_version = ".".join(map(str, sys.version_info[0:2])) intersphinx_mapping = { "sphinx": ("http://www.sphinx-doc.org/en/stable", None), "python": ("https://docs.python.org/" + python_version, None), "matplotlib": ("https://matplotlib.org", None), "numpy": ("https://docs.scipy.org/doc/numpy", None), "sklearn": ("https://scikit-learn.org/stable", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), "scipy": ("https://docs.scipy.org/doc/scipy/reference", None), "pyscaffold": ("https://pyscaffold.org/en/stable", None), }
33.54417
89
0.696092
import os import sys import inspect import shutil __location__ = os.path.join( os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe())) ) sys.path.insert(0, os.path.join(__location__, "../src")) # setup.py install" in the RTD Advanced Settings. # Additionally it helps us to avoid running apidoc manually try: # for Sphinx >= 1.7 from sphinx.ext import apidoc except ImportError: from sphinx import apidoc output_dir = os.path.join(__location__, "api") module_dir = os.path.join(__location__, "../src/obfuscator") try: shutil.rmtree(output_dir) except FileNotFoundError: pass try: import sphinx cmd_line_template = ( "sphinx-apidoc --implicit-namespaces -f -o {outputdir} {moduledir}" ) cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir) args = cmd_line.split(" ") if tuple(sphinx.__version__.split(".")) >= ("1", "7"): # This is a rudimentary parse_version to avoid external dependencies args = args[1:] apidoc.main(args) except Exception as e: print("Running `sphinx-apidoc` failed!\n{}".format(e)) # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.todo", "sphinx.ext.autosummary", "sphinx.ext.viewcode", "sphinx.ext.coverage", "sphinx.ext.doctest", "sphinx.ext.ifconfig", "sphinx.ext.mathjax", "sphinx.ext.napoleon", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix of source filenames. source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = "obfuscator" copyright = "2021, Benedikt Mangold" # The version info for the project you're documenting, acts as replacement for version = "" release = "" exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".venv"] pygments_style = "sphinx" html_theme = "alabaster" html_theme_options = { "sidebar_width": "300px", "page_width": "1200px" } try: from obfuscator import __version__ as version except ImportError: pass else: release = version html_static_path = ["_static"] htmlhelp_basename = "obfuscator-doc" latex_elements = { } latex_documents = [ ("index", "user_guide.tex", "obfuscator Documentation", "Benedikt Mangold", "manual") ] python_version = ".".join(map(str, sys.version_info[0:2])) intersphinx_mapping = { "sphinx": ("http://www.sphinx-doc.org/en/stable", None), "python": ("https://docs.python.org/" + python_version, None), "matplotlib": ("https://matplotlib.org", None), "numpy": ("https://docs.scipy.org/doc/numpy", None), "sklearn": ("https://scikit-learn.org/stable", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), "scipy": ("https://docs.scipy.org/doc/scipy/reference", None), "pyscaffold": ("https://pyscaffold.org/en/stable", None), }
true
true
f7247db16c62de200bc2c5deee5c7ca83220e585
4,738
py
Python
test/streams.py
gonsp/LotterySampling
92ff14f602c05d747708b522cf05b9f9066c43e0
[ "MIT" ]
4
2020-06-03T15:17:28.000Z
2020-09-29T20:52:15.000Z
test/streams.py
gonsp/LotterySampling
92ff14f602c05d747708b522cf05b9f9066c43e0
[ "MIT" ]
null
null
null
test/streams.py
gonsp/LotterySampling
92ff14f602c05d747708b522cf05b9f9066c43e0
[ "MIT" ]
null
null
null
import itertools import math import numpy as np from abc import abstractmethod from io import TextIOWrapper from sorted_list import SortedList class Stream(): def __init__(self, length, save=True): self.length = length self.N = 0 self.n = 0 self.save = save self.elements = SortedList() def __iter__(self): return self def __next__(self): self.N += 1 if self.N > self.length: raise StopIteration element = self.next_element() if self.save: # To speed-up tests in which it is not necessary to check accuracy self.elements.process_element(element) self.n = self.elements.size() return element @abstractmethod def next_element(self): pass def top_k_query(self, k): return [(str(id), count/self.N) for id, count in itertools.islice(iter(self.elements), k)] def frequent_query(self, freq): return [(str(id), count/self.N) for id, count in itertools.takewhile(lambda element: element[1] >= math.ceil(freq * self.N), iter(self.elements))] def chunk_stream(stream, chunk_size): it = iter(stream) while True: chunk = list(itertools.islice(it, chunk_size)) if len(chunk) > 0: yield chunk else: return None class MultiZipf(Stream): def __init__(self, length, alpha=1.5, segments=2, offset=10000, seed=None, save=True): super().__init__(length, save) self.alpha = alpha self.segments = segments self.offset = offset np.random.seed(seed) def next_element(self): element = np.random.zipf(self.alpha) element += self.offset * (self.N // (self.length / self.segments)) return int(element) class Zipf(MultiZipf): def __init__(self, length, alpha=1.5, seed=None, save=True): super().__init__(length, alpha=alpha, segments=1, seed=seed, save=save) class Uniform(Stream): def __init__(self, length, n_max, seed=None, save=True): super().__init__(length, save) self.n_max = n_max np.random.seed(seed) def next_element(self): return np.random.randint(0, self.n_max) class Unequal(Stream): def __init__(self, length, alpha, beta, seed=None, save=True): super().__init__(length, save) data = np.zeros(length, dtype=int) for i in range(alpha): for j in range(beta): data[i*beta + j] = i for i in range(alpha * beta, length): data[i] = i - alpha * (beta - 1) np.random.seed(seed) self.data = iter(np.random.permutation(data)) def next_element(self): return next(self.data) class File(Stream): def __init__(self, file_path, length=math.inf, shuffle=False, repetitions=1, seed=None, save=True): if shuffle or repetitions > 1: self.data = [] with open(file_path, 'r') as file: for line in file: element = line[:-1] self.data.append(element) self.data *= repetitions length = min(len(self.data), length) if shuffle: np.random.seed(seed) self.data = np.random.permutation(self.data) else: with open(file_path, 'r') as file: length = min(sum(1 for _ in file), length) self.data = open(file_path, 'r') super().__init__(length, save) def next_element(self): if isinstance(self.data, TextIOWrapper): element = self.data.readline()[:-1] if element == '': raise StopIteration return element else: if self.N == len(self.data): raise StopIteration return self.data[self.N] class ZipfNoiseZipf(Stream): def __init__(self, length, alpha=1.5, noise=0.3, offset=10000, seed=None, save=True): super().__init__(length, save) self.alpha = alpha self.noise = noise self.offset = offset np.random.seed(seed) def next_element(self): if self.N < self.length * (1 - self.noise) // 2: return int(np.random.zipf(self.alpha)) elif self.N < self.length - self.length * (1 - self.noise) // 2: return self.N else: return int(np.random.zipf(self.alpha) + self.offset) class ESA(Stream): def __init__(self, length, seed=None, save=True): super().__init__(length, save) np.random.seed(seed) def next_element(self): if self.N < self.length // 2: return self.N // 2 else: return self.length
27.387283
154
0.580625
import itertools import math import numpy as np from abc import abstractmethod from io import TextIOWrapper from sorted_list import SortedList class Stream(): def __init__(self, length, save=True): self.length = length self.N = 0 self.n = 0 self.save = save self.elements = SortedList() def __iter__(self): return self def __next__(self): self.N += 1 if self.N > self.length: raise StopIteration element = self.next_element() if self.save: self.elements.process_element(element) self.n = self.elements.size() return element @abstractmethod def next_element(self): pass def top_k_query(self, k): return [(str(id), count/self.N) for id, count in itertools.islice(iter(self.elements), k)] def frequent_query(self, freq): return [(str(id), count/self.N) for id, count in itertools.takewhile(lambda element: element[1] >= math.ceil(freq * self.N), iter(self.elements))] def chunk_stream(stream, chunk_size): it = iter(stream) while True: chunk = list(itertools.islice(it, chunk_size)) if len(chunk) > 0: yield chunk else: return None class MultiZipf(Stream): def __init__(self, length, alpha=1.5, segments=2, offset=10000, seed=None, save=True): super().__init__(length, save) self.alpha = alpha self.segments = segments self.offset = offset np.random.seed(seed) def next_element(self): element = np.random.zipf(self.alpha) element += self.offset * (self.N // (self.length / self.segments)) return int(element) class Zipf(MultiZipf): def __init__(self, length, alpha=1.5, seed=None, save=True): super().__init__(length, alpha=alpha, segments=1, seed=seed, save=save) class Uniform(Stream): def __init__(self, length, n_max, seed=None, save=True): super().__init__(length, save) self.n_max = n_max np.random.seed(seed) def next_element(self): return np.random.randint(0, self.n_max) class Unequal(Stream): def __init__(self, length, alpha, beta, seed=None, save=True): super().__init__(length, save) data = np.zeros(length, dtype=int) for i in range(alpha): for j in range(beta): data[i*beta + j] = i for i in range(alpha * beta, length): data[i] = i - alpha * (beta - 1) np.random.seed(seed) self.data = iter(np.random.permutation(data)) def next_element(self): return next(self.data) class File(Stream): def __init__(self, file_path, length=math.inf, shuffle=False, repetitions=1, seed=None, save=True): if shuffle or repetitions > 1: self.data = [] with open(file_path, 'r') as file: for line in file: element = line[:-1] self.data.append(element) self.data *= repetitions length = min(len(self.data), length) if shuffle: np.random.seed(seed) self.data = np.random.permutation(self.data) else: with open(file_path, 'r') as file: length = min(sum(1 for _ in file), length) self.data = open(file_path, 'r') super().__init__(length, save) def next_element(self): if isinstance(self.data, TextIOWrapper): element = self.data.readline()[:-1] if element == '': raise StopIteration return element else: if self.N == len(self.data): raise StopIteration return self.data[self.N] class ZipfNoiseZipf(Stream): def __init__(self, length, alpha=1.5, noise=0.3, offset=10000, seed=None, save=True): super().__init__(length, save) self.alpha = alpha self.noise = noise self.offset = offset np.random.seed(seed) def next_element(self): if self.N < self.length * (1 - self.noise) // 2: return int(np.random.zipf(self.alpha)) elif self.N < self.length - self.length * (1 - self.noise) // 2: return self.N else: return int(np.random.zipf(self.alpha) + self.offset) class ESA(Stream): def __init__(self, length, seed=None, save=True): super().__init__(length, save) np.random.seed(seed) def next_element(self): if self.N < self.length // 2: return self.N // 2 else: return self.length
true
true
f7247e8a17813fa402740c3c525d9fef0664b3d9
116
py
Python
web-app/backend/apps/common/routes/__init__.py
titoeb/kfserving
b072a76842b57e904dbdf46a136474a22051500d
[ "Apache-2.0" ]
47
2022-01-02T09:59:15.000Z
2022-01-25T11:11:17.000Z
web-app/backend/apps/common/routes/__init__.py
titoeb/kfserving
b072a76842b57e904dbdf46a136474a22051500d
[ "Apache-2.0" ]
7
2021-08-31T23:55:06.000Z
2022-03-02T11:34:58.000Z
web-app/backend/apps/common/routes/__init__.py
titoeb/kfserving
b072a76842b57e904dbdf46a136474a22051500d
[ "Apache-2.0" ]
4
2022-01-27T08:59:15.000Z
2022-02-27T14:42:19.000Z
from flask import Blueprint bp = Blueprint("base_routes", __name__) from . import delete, get # noqa: F401, E402
19.333333
45
0.732759
from flask import Blueprint bp = Blueprint("base_routes", __name__) from . import delete, get
true
true
f7247ef6bd54462db7b045b5ea78324bc3752082
256
py
Python
sessioncontroller/settings.py
synteny/AuroraBot
179919e1e6cc5f24d9cf3e9295d0f043174a6169
[ "MIT" ]
2
2015-11-27T11:08:24.000Z
2018-03-21T15:35:08.000Z
sessioncontroller/settings.py
synteny/AuroraBot
179919e1e6cc5f24d9cf3e9295d0f043174a6169
[ "MIT" ]
2
2015-10-27T15:06:09.000Z
2015-10-28T12:53:12.000Z
sessioncontroller/settings.py
synteny/AuroraBot
179919e1e6cc5f24d9cf3e9295d0f043174a6169
[ "MIT" ]
null
null
null
import os TELEGRAM_TOKEN = os.environ['TELEGRAM_TOKEN'] DATABASE = { 'HOST': os.getenv('DB_PORT_3306_TCP_ADDR', 'localhost'), 'USER': os.getenv('DB_MYSQL_USER', 'root'), 'PASSWORD': os.getenv('DB_MYSQL_PASSWORD', ''), 'NAME': 'aurora', }
23.272727
60
0.652344
import os TELEGRAM_TOKEN = os.environ['TELEGRAM_TOKEN'] DATABASE = { 'HOST': os.getenv('DB_PORT_3306_TCP_ADDR', 'localhost'), 'USER': os.getenv('DB_MYSQL_USER', 'root'), 'PASSWORD': os.getenv('DB_MYSQL_PASSWORD', ''), 'NAME': 'aurora', }
true
true
f7247f14b21d4ad2ba934ab6dab587b66188f368
8,305
py
Python
fpn/operator_py/fpn_roi_pooling.py
CosmosHua/Deformable-ConvNets
6aeda878a95bcb55eadffbe125804e730574de8d
[ "MIT" ]
3,976
2017-05-05T13:48:27.000Z
2022-03-30T13:37:48.000Z
fpn/operator_py/fpn_roi_pooling.py
CosmosHua/Deformable-ConvNets
6aeda878a95bcb55eadffbe125804e730574de8d
[ "MIT" ]
259
2017-05-06T13:30:11.000Z
2022-03-16T14:11:16.000Z
fpn/operator_py/fpn_roi_pooling.py
CosmosHua/Deformable-ConvNets
6aeda878a95bcb55eadffbe125804e730574de8d
[ "MIT" ]
1,051
2017-05-05T14:55:57.000Z
2022-03-23T01:02:47.000Z
# -------------------------------------------------------- # Deformable Convolutional Networks # Copyright (c) 2017 Microsoft # Licensed under The MIT License [see LICENSE for details] # Modified by Haozhi Qi, Yuwen Xiong # -------------------------------------------------------- import mxnet as mx import numpy as np from mxnet.contrib import autograd import gc class FPNROIPoolingOperator(mx.operator.CustomOp): def __init__(self, feat_strides, pooled_height, pooled_width, output_dim, with_deformable): self.pooled_height = pooled_height self.pooled_width = pooled_width self.feat_strides = feat_strides self.with_deformable = with_deformable self.output_dim = output_dim self.in_grad_hist_list = [] self.num_strides = len(self.feat_strides) self.roi_pool = [None for _ in range(self.num_strides)] self.feat_idx = [None for _ in range(self.num_strides)] def forward(self, is_train, req, in_data, out_data, aux): rois = in_data[-1].asnumpy() w = rois[:, 3] - rois[:, 1] + 1 h = rois[:, 4] - rois[:, 2] + 1 feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, len(self.feat_strides) - 1) pyramid_idx = [] rois_p = [None for _ in range(self.num_strides)] for i in range(self.num_strides): self.feat_idx[i] = np.where(feat_id == i)[0] if len(self.feat_idx[i]) == 0: # padding dummy roi rois_p[i] = np.zeros((1, 5)) pyramid_idx.append(-1) else: rois_p[i] = rois[self.feat_idx[i]] pyramid_idx.append(self.feat_idx[i]) rois_idx = np.argsort(np.hstack(pyramid_idx))[-rois.shape[0]:] if is_train: for i in range(self.num_strides): self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i])) if self.with_deformable: for i in range(self.num_strides, self.num_strides * 3): self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i])) autograd.mark_variables([in_data[i] for i in range(self.num_strides * 3)], self.in_grad_hist_list) with autograd.train_section(): for i in range(self.num_strides): roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256, spatial_scale=1.0 / self.feat_strides[i]) roi_offset = mx.nd.FullyConnected(data=roi_offset_t, num_hidden=7 * 7 * 2, weight=in_data[i * 2 + self.num_strides], bias=in_data[i * 2 + 1 + self.num_strides]) roi_offset_reshape = mx.nd.reshape(data=roi_offset, shape=(-1, 2, 7, 7)) self.roi_pool[i] = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), trans=roi_offset_reshape, group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=self.output_dim, spatial_scale=1.0 / self.feat_strides[i], trans_std=0.1) else: autograd.mark_variables([in_data[i] for i in range(self.num_strides)], self.in_grad_hist_list) with autograd.train_section(): for i in range(self.num_strides): self.roi_pool[i] = mx.nd.ROIPooling(in_data[i], mx.nd.array(rois_p[i], in_data[i].context), (7, 7), spatial_scale=1.0 / self.feat_strides[i]) roi_pool = mx.nd.concatenate(self.roi_pool, axis=0) else: # during testing, there is no need to record variable, thus saving memory roi_pool = [None for _ in range(self.num_strides)] if self.with_deformable: for i in range(self.num_strides): roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256, spatial_scale=1.0 / self.feat_strides[i]) roi_offset = mx.nd.FullyConnected(data=roi_offset_t, num_hidden=7 * 7 * 2, weight=in_data[i * 2 + self.num_strides], bias=in_data[i * 2 + 1 + self.num_strides]) roi_offset_reshape = mx.nd.reshape(data=roi_offset, shape=(-1, 2, 7, 7)) roi_pool[i] = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), trans=roi_offset_reshape, group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=self.output_dim, spatial_scale=1.0 / self.feat_strides[i], trans_std=0.1) else: for i in range(self.num_strides): roi_pool[i] = mx.nd.ROIPooling(in_data[i], mx.nd.array(rois_p[i], in_data[i].context), (7, 7), spatial_scale=1.0 / self.feat_strides[i]) roi_pool = mx.nd.concatenate(roi_pool, axis=0) roi_pool = mx.nd.take(roi_pool, mx.nd.array(rois_idx, roi_pool.context)) self.assign(out_data[0], req[0], roi_pool) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): for i in range(len(in_grad)): self.assign(in_grad[i], req[i], 0) with autograd.train_section(): for i in range(self.num_strides): if len(self.feat_idx[i] > 0): autograd.compute_gradient([mx.nd.take(out_grad[0], mx.nd.array(self.feat_idx[i], out_grad[0].context)) * self.roi_pool[i]]) if self.with_deformable: for i in range(0, self.num_strides * 3): self.assign(in_grad[i], req[i], self.in_grad_hist_list[i]) else: for i in range(0, self.num_strides): self.assign(in_grad[i], req[i], self.in_grad_hist_list[i]) gc.collect() @mx.operator.register('fpn_roi_pooling') class FPNROIPoolingProp(mx.operator.CustomOpProp): def __init__(self, feat_strides='(4,8,16,32)', pooled_height='7', pooled_width='7', with_deformable='False', output_dim='256'): super(FPNROIPoolingProp, self).__init__(need_top_grad=True) self.pooled_height = int(pooled_height) self.pooled_width = int(pooled_width) self.feat_strides = np.fromstring(feat_strides[1:-1], dtype=int, sep=',') self.with_deformable = with_deformable == 'True' self.output_dim = int(output_dim) self.num_strides = len(self.feat_strides) def list_arguments(self): args_list = [] for i in range(self.num_strides): args_list.append('data_p{}'.format(2 + i)) if self.with_deformable: for i in range(self.num_strides): args_list.extend(['offset_weight_p{}'.format(2 + i), 'offset_bias_p{}'.format(2 + i)]) args_list.append('rois') return args_list def list_outputs(self): return ['output'] def infer_shape(self, in_shape): output_feat_shape = [in_shape[-1][0], in_shape[0][1], self.pooled_height, self.pooled_width] if self.with_deformable: offset_dim = self.pooled_height * self.pooled_width * 2 input_dim = self.pooled_height * self.pooled_width * self.output_dim for i in range(self.num_strides): in_shape[i * 2 + self.num_strides], in_shape[i * 2 + 1 + self.num_strides] = [offset_dim, input_dim], [offset_dim, ] return in_shape, [output_feat_shape] def create_operator(self, ctx, shapes, dtypes): return FPNROIPoolingOperator(self.feat_strides, self.pooled_height, self.pooled_width, self.output_dim, self.with_deformable) def declare_backward_dependency(self, out_grad, in_data, out_data): return [out_grad[0]]
56.114865
184
0.593618
import mxnet as mx import numpy as np from mxnet.contrib import autograd import gc class FPNROIPoolingOperator(mx.operator.CustomOp): def __init__(self, feat_strides, pooled_height, pooled_width, output_dim, with_deformable): self.pooled_height = pooled_height self.pooled_width = pooled_width self.feat_strides = feat_strides self.with_deformable = with_deformable self.output_dim = output_dim self.in_grad_hist_list = [] self.num_strides = len(self.feat_strides) self.roi_pool = [None for _ in range(self.num_strides)] self.feat_idx = [None for _ in range(self.num_strides)] def forward(self, is_train, req, in_data, out_data, aux): rois = in_data[-1].asnumpy() w = rois[:, 3] - rois[:, 1] + 1 h = rois[:, 4] - rois[:, 2] + 1 feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, len(self.feat_strides) - 1) pyramid_idx = [] rois_p = [None for _ in range(self.num_strides)] for i in range(self.num_strides): self.feat_idx[i] = np.where(feat_id == i)[0] if len(self.feat_idx[i]) == 0: rois_p[i] = np.zeros((1, 5)) pyramid_idx.append(-1) else: rois_p[i] = rois[self.feat_idx[i]] pyramid_idx.append(self.feat_idx[i]) rois_idx = np.argsort(np.hstack(pyramid_idx))[-rois.shape[0]:] if is_train: for i in range(self.num_strides): self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i])) if self.with_deformable: for i in range(self.num_strides, self.num_strides * 3): self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i])) autograd.mark_variables([in_data[i] for i in range(self.num_strides * 3)], self.in_grad_hist_list) with autograd.train_section(): for i in range(self.num_strides): roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256, spatial_scale=1.0 / self.feat_strides[i]) roi_offset = mx.nd.FullyConnected(data=roi_offset_t, num_hidden=7 * 7 * 2, weight=in_data[i * 2 + self.num_strides], bias=in_data[i * 2 + 1 + self.num_strides]) roi_offset_reshape = mx.nd.reshape(data=roi_offset, shape=(-1, 2, 7, 7)) self.roi_pool[i] = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), trans=roi_offset_reshape, group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=self.output_dim, spatial_scale=1.0 / self.feat_strides[i], trans_std=0.1) else: autograd.mark_variables([in_data[i] for i in range(self.num_strides)], self.in_grad_hist_list) with autograd.train_section(): for i in range(self.num_strides): self.roi_pool[i] = mx.nd.ROIPooling(in_data[i], mx.nd.array(rois_p[i], in_data[i].context), (7, 7), spatial_scale=1.0 / self.feat_strides[i]) roi_pool = mx.nd.concatenate(self.roi_pool, axis=0) else: roi_pool = [None for _ in range(self.num_strides)] if self.with_deformable: for i in range(self.num_strides): roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256, spatial_scale=1.0 / self.feat_strides[i]) roi_offset = mx.nd.FullyConnected(data=roi_offset_t, num_hidden=7 * 7 * 2, weight=in_data[i * 2 + self.num_strides], bias=in_data[i * 2 + 1 + self.num_strides]) roi_offset_reshape = mx.nd.reshape(data=roi_offset, shape=(-1, 2, 7, 7)) roi_pool[i] = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), trans=roi_offset_reshape, group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=self.output_dim, spatial_scale=1.0 / self.feat_strides[i], trans_std=0.1) else: for i in range(self.num_strides): roi_pool[i] = mx.nd.ROIPooling(in_data[i], mx.nd.array(rois_p[i], in_data[i].context), (7, 7), spatial_scale=1.0 / self.feat_strides[i]) roi_pool = mx.nd.concatenate(roi_pool, axis=0) roi_pool = mx.nd.take(roi_pool, mx.nd.array(rois_idx, roi_pool.context)) self.assign(out_data[0], req[0], roi_pool) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): for i in range(len(in_grad)): self.assign(in_grad[i], req[i], 0) with autograd.train_section(): for i in range(self.num_strides): if len(self.feat_idx[i] > 0): autograd.compute_gradient([mx.nd.take(out_grad[0], mx.nd.array(self.feat_idx[i], out_grad[0].context)) * self.roi_pool[i]]) if self.with_deformable: for i in range(0, self.num_strides * 3): self.assign(in_grad[i], req[i], self.in_grad_hist_list[i]) else: for i in range(0, self.num_strides): self.assign(in_grad[i], req[i], self.in_grad_hist_list[i]) gc.collect() @mx.operator.register('fpn_roi_pooling') class FPNROIPoolingProp(mx.operator.CustomOpProp): def __init__(self, feat_strides='(4,8,16,32)', pooled_height='7', pooled_width='7', with_deformable='False', output_dim='256'): super(FPNROIPoolingProp, self).__init__(need_top_grad=True) self.pooled_height = int(pooled_height) self.pooled_width = int(pooled_width) self.feat_strides = np.fromstring(feat_strides[1:-1], dtype=int, sep=',') self.with_deformable = with_deformable == 'True' self.output_dim = int(output_dim) self.num_strides = len(self.feat_strides) def list_arguments(self): args_list = [] for i in range(self.num_strides): args_list.append('data_p{}'.format(2 + i)) if self.with_deformable: for i in range(self.num_strides): args_list.extend(['offset_weight_p{}'.format(2 + i), 'offset_bias_p{}'.format(2 + i)]) args_list.append('rois') return args_list def list_outputs(self): return ['output'] def infer_shape(self, in_shape): output_feat_shape = [in_shape[-1][0], in_shape[0][1], self.pooled_height, self.pooled_width] if self.with_deformable: offset_dim = self.pooled_height * self.pooled_width * 2 input_dim = self.pooled_height * self.pooled_width * self.output_dim for i in range(self.num_strides): in_shape[i * 2 + self.num_strides], in_shape[i * 2 + 1 + self.num_strides] = [offset_dim, input_dim], [offset_dim, ] return in_shape, [output_feat_shape] def create_operator(self, ctx, shapes, dtypes): return FPNROIPoolingOperator(self.feat_strides, self.pooled_height, self.pooled_width, self.output_dim, self.with_deformable) def declare_backward_dependency(self, out_grad, in_data, out_data): return [out_grad[0]]
true
true
f7247f42ef9c871c8ebd07fc747da69ad689d3a3
405
py
Python
05/iterator_example.py
alissonit/pythontrap
b7780913d49af2142be4a9674ac435e2a67da201
[ "MIT" ]
null
null
null
05/iterator_example.py
alissonit/pythontrap
b7780913d49af2142be4a9674ac435e2a67da201
[ "MIT" ]
null
null
null
05/iterator_example.py
alissonit/pythontrap
b7780913d49af2142be4a9674ac435e2a67da201
[ "MIT" ]
1
2021-03-15T18:26:14.000Z
2021-03-15T18:26:14.000Z
#CONSTRUINDO UMA CLASSE ITERATOR class GenItem(object): def __init__(self, first, last): self.first = first self.last = last def __iter__(self): return self def __next__(self): if self.first > self.last: raise StopIteration else: self.first += 1 return self.first - 1 n_list = GenItem(1,10) print(list(n_list))
22.5
36
0.577778
class GenItem(object): def __init__(self, first, last): self.first = first self.last = last def __iter__(self): return self def __next__(self): if self.first > self.last: raise StopIteration else: self.first += 1 return self.first - 1 n_list = GenItem(1,10) print(list(n_list))
true
true
f72480521f1fad6394a1656241b51fbd1c7d3230
14,939
py
Python
Lib/site-packages/wx-3.0-msw/wx/lib/gridmovers.py
jickieduan/python27
c752b552396bbed68d8555080d475718cea2edd0
[ "bzip2-1.0.6" ]
1
2021-02-13T22:40:50.000Z
2021-02-13T22:40:50.000Z
Lib/site-packages/wx-3.0-msw/wx/lib/gridmovers.py
jickieduan/python27
c752b552396bbed68d8555080d475718cea2edd0
[ "bzip2-1.0.6" ]
1
2018-07-28T20:07:04.000Z
2018-07-30T18:28:34.000Z
Lib/site-packages/wx-3.0-msw/wx/lib/gridmovers.py
jickieduan/python27
c752b552396bbed68d8555080d475718cea2edd0
[ "bzip2-1.0.6" ]
2
2019-12-02T01:39:10.000Z
2021-02-13T22:41:00.000Z
#---------------------------------------------------------------------------- # Name: GridColMover.py # Purpose: Grid Column Mover Extension # # Author: Gerrit van Dyk (email: gerritvd@decillion.net) # # Version 0.1 # Date: Nov 19, 2002 # RCS-ID: $Id$ # Licence: wxWindows license #---------------------------------------------------------------------------- # 12/07/2003 - Jeff Grimmett (grimmtooth@softhome.net) # # o 2.5 Compatability changes # # 12/18/2003 - Jeff Grimmett (grimmtooth@softhome.net) # # o wxGridColMoveEvent -> GridColMoveEvent # o wxGridRowMoveEvent -> GridRowMoveEvent # o wxGridColMover -> GridColMover # o wxGridRowMover -> GridRowMover # import wx import wx.grid #---------------------------------------------------------------------------- # event class and macros # # New style 12/7/03 # wxEVT_COMMAND_GRID_COL_MOVE = wx.NewEventType() wxEVT_COMMAND_GRID_ROW_MOVE = wx.NewEventType() EVT_GRID_COL_MOVE = wx.PyEventBinder(wxEVT_COMMAND_GRID_COL_MOVE, 1) EVT_GRID_ROW_MOVE = wx.PyEventBinder(wxEVT_COMMAND_GRID_ROW_MOVE, 1) #---------------------------------------------------------------------------- class GridColMoveEvent(wx.PyCommandEvent): def __init__(self, id, dCol, bCol): wx.PyCommandEvent.__init__(self, id = id) self.SetEventType(wxEVT_COMMAND_GRID_COL_MOVE) self.moveColumn = dCol self.beforeColumn = bCol def GetMoveColumn(self): return self.moveColumn def GetBeforeColumn(self): return self.beforeColumn class GridRowMoveEvent(wx.PyCommandEvent): def __init__(self, id, dRow, bRow): wx.PyCommandEvent.__init__(self,id = id) self.SetEventType(wxEVT_COMMAND_GRID_ROW_MOVE) self.moveRow = dRow self.beforeRow = bRow def GetMoveRow(self): return self.moveRow def GetBeforeRow(self): return self.beforeRow #---------------------------------------------------------------------------- # graft new methods into the wxGrid class def _ColToRect(self,col): if self.GetNumberRows() > 0: rect = self.CellToRect(0,col) else: rect = wx.Rect() rect.height = self.GetColLabelSize() rect.width = self.GetColSize(col) for cCol in range(0,col): rect.x += self.GetColSize(cCol) rect.y = self.GetGridColLabelWindow().GetPosition()[1] return rect wx.grid.Grid.ColToRect = _ColToRect def _RowToRect(self,row): if self.GetNumberCols() > 0: rect = self.CellToRect(row,0) else: rect = wx.Rect() rect.width = self.GetRowLabelSize() rect.height = self.GetRowSize(row) for cRow in range(0,row): rect.y += self.GetRowSize(cRow) rect.x = self.GetGridRowLabelWindow().GetPosition()[0] return rect wx.grid.Grid.RowToRect = _RowToRect #---------------------------------------------------------------------------- class ColDragWindow(wx.Window): def __init__(self,parent,image,dragCol): wx.Window.__init__(self,parent,-1, style=wx.SIMPLE_BORDER) self.image = image self.SetSize((self.image.GetWidth(),self.image.GetHeight())) self.ux = parent.GetScrollPixelsPerUnit()[0] self.moveColumn = dragCol self.Bind(wx.EVT_PAINT, self.OnPaint) def DisplayAt(self,pos,y): x = self.GetPositionTuple()[0] if x == pos: self.Refresh() # Need to display insertion point else: self.MoveXY(pos,y) def GetMoveColumn(self): return self.moveColumn def _GetInsertionInfo(self): parent = self.GetParent() sx = parent.GetViewStart()[0] * self.ux sx -= parent.GetRowLabelSize() x = self.GetPosition()[0] w = self.GetSize()[0] sCol = parent.XToCol(x + sx) eCol = parent.XToCol(x + w + sx) iPos = xPos = xCol = 99999 centerPos = x + sx + (w / 2) for col in range(sCol,eCol + 1): cx = parent.ColToRect(col)[0] if abs(cx - centerPos) < iPos: iPos = abs(cx - centerPos) xCol = col xPos = cx if xCol < 0 or xCol > parent.GetNumberCols(): xCol = parent.GetNumberCols() return (xPos - sx - x,xCol) def GetInsertionColumn(self): return self._GetInsertionInfo()[1] def GetInsertionPos(self): return self._GetInsertionInfo()[0] def OnPaint(self,evt): dc = wx.PaintDC(self) w,h = self.GetSize() dc.DrawBitmap(self.image, 0,0) dc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID)) dc.SetBrush(wx.TRANSPARENT_BRUSH) dc.DrawRectangle(0,0, w,h) iPos = self.GetInsertionPos() dc.DrawLine(iPos,h - 10, iPos,h) class RowDragWindow(wx.Window): def __init__(self,parent,image,dragRow): wx.Window.__init__(self,parent,-1, style=wx.SIMPLE_BORDER) self.image = image self.SetSize((self.image.GetWidth(),self.image.GetHeight())) self.uy = parent.GetScrollPixelsPerUnit()[1] self.moveRow = dragRow self.Bind(wx.EVT_PAINT, self.OnPaint) def DisplayAt(self,x,pos): y = self.GetPosition()[1] if y == pos: self.Refresh() # Need to display insertion point else: self.MoveXY(x,pos) def GetMoveRow(self): return self.moveRow def _GetInsertionInfo(self): parent = self.GetParent() sy = parent.GetViewStart()[1] * self.uy sy -= parent.GetColLabelSize() y = self.GetPosition()[1] h = self.GetSize()[1] sRow = parent.YToRow(y + sy) eRow = parent.YToRow(y + h + sy) iPos = yPos = yRow = 99999 centerPos = y + sy + (h / 2) for row in range(sRow,eRow + 1): cy = parent.RowToRect(row)[1] if abs(cy - centerPos) < iPos: iPos = abs(cy - centerPos) yRow = row yPos = cy if yRow < 0 or yRow > parent.GetNumberRows(): yRow = parent.GetNumberRows() return (yPos - sy - y,yRow) def GetInsertionRow(self): return self._GetInsertionInfo()[1] def GetInsertionPos(self): return self._GetInsertionInfo()[0] def OnPaint(self,evt): dc = wx.PaintDC(self) w,h = self.GetSize() dc.DrawBitmap(self.image, 0,0) dc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID)) dc.SetBrush(wx.TRANSPARENT_BRUSH) dc.DrawRectangle(0,0, w,h) iPos = self.GetInsertionPos() dc.DrawLine(w - 10,iPos, w,iPos) #---------------------------------------------------------------------------- class GridColMover(wx.EvtHandler): def __init__(self,grid): wx.EvtHandler.__init__(self) self.grid = grid self.lwin = grid.GetGridColLabelWindow() self.lwin.PushEventHandler(self) self.colWin = None self.ux = self.grid.GetScrollPixelsPerUnit()[0] self.startX = -10 self.cellX = 0 self.didMove = False self.isDragging = False self.Bind(wx.EVT_MOTION, self.OnMouseMove) self.Bind(wx.EVT_LEFT_DOWN, self.OnPress) self.Bind(wx.EVT_LEFT_UP, self.OnRelease) def OnMouseMove(self,evt): if not self.isDragging: evt.Skip() else: _rlSize = self.grid.GetRowLabelSize() if abs(self.startX - evt.X) >= 3 \ and abs(evt.X - self.lastX) >= 3: self.lastX = evt.X self.didMove = True sx,y = self.grid.GetViewStart() w,h = self.lwin.GetClientSize() x = sx * self.ux if (evt.X + x) < x: x = evt.X + x elif evt.X > w: x += evt.X - w if x < 1: x = 0 else: x /= self.ux if x != sx: if wx.Platform == '__WXMSW__': self.colWin.Show(False) self.grid.Scroll(x,y) x,y = self.lwin.ClientToScreenXY(evt.X,0) x,y = self.grid.ScreenToClientXY(x,y) if not self.colWin.IsShown(): self.colWin.Show(True) px = x - self.cellX if px < 0 + _rlSize: px = 0 + _rlSize if px > w - self.colWin.GetSize()[0] + _rlSize: px = w - self.colWin.GetSize()[0] + _rlSize self.colWin.DisplayAt(px,y) return def OnPress(self,evt): self.startX = self.lastX = evt.X _rlSize = self.grid.GetRowLabelSize() sx = self.grid.GetViewStart()[0] * self.ux sx -= _rlSize px,py = self.lwin.ClientToScreenXY(evt.X,evt.Y) px,py = self.grid.ScreenToClientXY(px,py) if self.grid.XToEdgeOfCol(px + sx) != wx.NOT_FOUND: evt.Skip() return self.isDragging = True self.didMove = False col = self.grid.XToCol(px + sx) rect = self.grid.ColToRect(col) self.cellX = px + sx - rect.x size = self.lwin.GetSize() rect.y = 0 rect.x -= sx + _rlSize rect.height = size[1] colImg = self._CaptureImage(rect) self.colWin = ColDragWindow(self.grid,colImg,col) self.colWin.Show(False) self.lwin.CaptureMouse() evt.Skip() def OnRelease(self,evt): if self.isDragging: self.lwin.ReleaseMouse() self.colWin.Show(False) self.isDragging = False if not self.didMove: px = self.lwin.ClientToScreenXY(self.startX,0)[0] px = self.grid.ScreenToClientXY(px,0)[0] sx = self.grid.GetViewStart()[0] * self.ux sx -= self.grid.GetRowLabelSize() col = self.grid.XToCol(px+sx) if col != wx.NOT_FOUND: self.grid.SelectCol(col,evt.ControlDown()) return else: bCol = self.colWin.GetInsertionColumn() dCol = self.colWin.GetMoveColumn() wx.PostEvent(self, GridColMoveEvent(self.grid.GetId(), dCol, bCol)) self.colWin.Destroy() evt.Skip() def _CaptureImage(self,rect): bmp = wx.EmptyBitmap(rect.width,rect.height) memdc = wx.MemoryDC() memdc.SelectObject(bmp) dc = wx.WindowDC(self.lwin) memdc.Blit(0,0, rect.width, rect.height, dc, rect.x, rect.y) memdc.SelectObject(wx.NullBitmap) return bmp class GridRowMover(wx.EvtHandler): def __init__(self,grid): wx.EvtHandler.__init__(self) self.grid = grid self.lwin = grid.GetGridRowLabelWindow() self.lwin.PushEventHandler(self) self.rowWin = None self.uy = self.grid.GetScrollPixelsPerUnit()[1] self.startY = -10 self.cellY = 0 self.didMove = False self.isDragging = False self.Bind(wx.EVT_MOTION, self.OnMouseMove) self.Bind(wx.EVT_LEFT_DOWN, self.OnPress) self.Bind(wx.EVT_LEFT_UP, self.OnRelease) def OnMouseMove(self,evt): if not self.isDragging: evt.Skip() else: _clSize = self.grid.GetColLabelSize() if abs(self.startY - evt.Y) >= 3 \ and abs(evt.Y - self.lastY) >= 3: self.lastY = evt.Y self.didMove = True x,sy = self.grid.GetViewStart() w,h = self.lwin.GetClientSizeTuple() y = sy * self.uy if (evt.Y + y) < y: y = evt.Y + y elif evt.Y > h: y += evt.Y - h if y < 1: y = 0 else: y /= self.uy if y != sy: if wx.Platform == '__WXMSW__': self.rowWin.Show(False) self.grid.Scroll(x,y) x,y = self.lwin.ClientToScreenXY(0,evt.Y) x,y = self.grid.ScreenToClientXY(x,y) if not self.rowWin.IsShown(): self.rowWin.Show(True) py = y - self.cellY if py < 0 + _clSize: py = 0 + _clSize if py > h - self.rowWin.GetSize()[1] + _clSize: py = h - self.rowWin.GetSize()[1] + _clSize self.rowWin.DisplayAt(x,py) return def OnPress(self,evt): self.startY = self.lastY = evt.Y _clSize = self.grid.GetColLabelSize() sy = self.grid.GetViewStart()[1] * self.uy sy -= _clSize px,py = self.lwin.ClientToScreenXY(evt.X,evt.Y) px,py = self.grid.ScreenToClientXY(px,py) if self.grid.YToEdgeOfRow(py + sy) != wx.NOT_FOUND: evt.Skip() return row = self.grid.YToRow(py + sy) if row == wx.NOT_FOUND: evt.Skip() return self.isDragging = True self.didMove = False rect = self.grid.RowToRect(row) self.cellY = py + sy - rect.y size = self.lwin.GetSize() rect.x = 0 rect.y -= sy + _clSize rect.width = size[0] rowImg = self._CaptureImage(rect) self.rowWin = RowDragWindow(self.grid,rowImg,row) self.rowWin.Show(False) self.lwin.CaptureMouse() evt.Skip() def OnRelease(self,evt): if self.isDragging: self.lwin.ReleaseMouse() self.rowWin.Show(False) self.isDragging = False if not self.didMove: py = self.lwin.ClientToScreenXY(0,self.startY)[1] py = self.grid.ScreenToClientXY(0,py)[1] sy = self.grid.GetViewStart()[1] * self.uy sy -= self.grid.GetColLabelSize() row = self.grid.YToRow(py + sy) if row != wx.NOT_FOUND: self.grid.SelectRow(row,evt.ControlDown()) return else: bRow = self.rowWin.GetInsertionRow() dRow = self.rowWin.GetMoveRow() wx.PostEvent(self, GridRowMoveEvent(self.grid.GetId(), dRow, bRow)) self.rowWin.Destroy() evt.Skip() def _CaptureImage(self,rect): bmp = wx.EmptyBitmap(rect.width,rect.height) memdc = wx.MemoryDC() memdc.SelectObject(bmp) dc = wx.WindowDC(self.lwin) memdc.Blit(0,0, rect.width, rect.height, dc, rect.x, rect.y) memdc.SelectObject(wx.NullBitmap) return bmp #----------------------------------------------------------------------------
30.240891
77
0.522391
import wx import wx.grid wxEVT_COMMAND_GRID_COL_MOVE = wx.NewEventType() wxEVT_COMMAND_GRID_ROW_MOVE = wx.NewEventType() EVT_GRID_COL_MOVE = wx.PyEventBinder(wxEVT_COMMAND_GRID_COL_MOVE, 1) EVT_GRID_ROW_MOVE = wx.PyEventBinder(wxEVT_COMMAND_GRID_ROW_MOVE, 1) class GridColMoveEvent(wx.PyCommandEvent): def __init__(self, id, dCol, bCol): wx.PyCommandEvent.__init__(self, id = id) self.SetEventType(wxEVT_COMMAND_GRID_COL_MOVE) self.moveColumn = dCol self.beforeColumn = bCol def GetMoveColumn(self): return self.moveColumn def GetBeforeColumn(self): return self.beforeColumn class GridRowMoveEvent(wx.PyCommandEvent): def __init__(self, id, dRow, bRow): wx.PyCommandEvent.__init__(self,id = id) self.SetEventType(wxEVT_COMMAND_GRID_ROW_MOVE) self.moveRow = dRow self.beforeRow = bRow def GetMoveRow(self): return self.moveRow def GetBeforeRow(self): return self.beforeRow def _ColToRect(self,col): if self.GetNumberRows() > 0: rect = self.CellToRect(0,col) else: rect = wx.Rect() rect.height = self.GetColLabelSize() rect.width = self.GetColSize(col) for cCol in range(0,col): rect.x += self.GetColSize(cCol) rect.y = self.GetGridColLabelWindow().GetPosition()[1] return rect wx.grid.Grid.ColToRect = _ColToRect def _RowToRect(self,row): if self.GetNumberCols() > 0: rect = self.CellToRect(row,0) else: rect = wx.Rect() rect.width = self.GetRowLabelSize() rect.height = self.GetRowSize(row) for cRow in range(0,row): rect.y += self.GetRowSize(cRow) rect.x = self.GetGridRowLabelWindow().GetPosition()[0] return rect wx.grid.Grid.RowToRect = _RowToRect class ColDragWindow(wx.Window): def __init__(self,parent,image,dragCol): wx.Window.__init__(self,parent,-1, style=wx.SIMPLE_BORDER) self.image = image self.SetSize((self.image.GetWidth(),self.image.GetHeight())) self.ux = parent.GetScrollPixelsPerUnit()[0] self.moveColumn = dragCol self.Bind(wx.EVT_PAINT, self.OnPaint) def DisplayAt(self,pos,y): x = self.GetPositionTuple()[0] if x == pos: self.Refresh() else: self.MoveXY(pos,y) def GetMoveColumn(self): return self.moveColumn def _GetInsertionInfo(self): parent = self.GetParent() sx = parent.GetViewStart()[0] * self.ux sx -= parent.GetRowLabelSize() x = self.GetPosition()[0] w = self.GetSize()[0] sCol = parent.XToCol(x + sx) eCol = parent.XToCol(x + w + sx) iPos = xPos = xCol = 99999 centerPos = x + sx + (w / 2) for col in range(sCol,eCol + 1): cx = parent.ColToRect(col)[0] if abs(cx - centerPos) < iPos: iPos = abs(cx - centerPos) xCol = col xPos = cx if xCol < 0 or xCol > parent.GetNumberCols(): xCol = parent.GetNumberCols() return (xPos - sx - x,xCol) def GetInsertionColumn(self): return self._GetInsertionInfo()[1] def GetInsertionPos(self): return self._GetInsertionInfo()[0] def OnPaint(self,evt): dc = wx.PaintDC(self) w,h = self.GetSize() dc.DrawBitmap(self.image, 0,0) dc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID)) dc.SetBrush(wx.TRANSPARENT_BRUSH) dc.DrawRectangle(0,0, w,h) iPos = self.GetInsertionPos() dc.DrawLine(iPos,h - 10, iPos,h) class RowDragWindow(wx.Window): def __init__(self,parent,image,dragRow): wx.Window.__init__(self,parent,-1, style=wx.SIMPLE_BORDER) self.image = image self.SetSize((self.image.GetWidth(),self.image.GetHeight())) self.uy = parent.GetScrollPixelsPerUnit()[1] self.moveRow = dragRow self.Bind(wx.EVT_PAINT, self.OnPaint) def DisplayAt(self,x,pos): y = self.GetPosition()[1] if y == pos: self.Refresh() else: self.MoveXY(x,pos) def GetMoveRow(self): return self.moveRow def _GetInsertionInfo(self): parent = self.GetParent() sy = parent.GetViewStart()[1] * self.uy sy -= parent.GetColLabelSize() y = self.GetPosition()[1] h = self.GetSize()[1] sRow = parent.YToRow(y + sy) eRow = parent.YToRow(y + h + sy) iPos = yPos = yRow = 99999 centerPos = y + sy + (h / 2) for row in range(sRow,eRow + 1): cy = parent.RowToRect(row)[1] if abs(cy - centerPos) < iPos: iPos = abs(cy - centerPos) yRow = row yPos = cy if yRow < 0 or yRow > parent.GetNumberRows(): yRow = parent.GetNumberRows() return (yPos - sy - y,yRow) def GetInsertionRow(self): return self._GetInsertionInfo()[1] def GetInsertionPos(self): return self._GetInsertionInfo()[0] def OnPaint(self,evt): dc = wx.PaintDC(self) w,h = self.GetSize() dc.DrawBitmap(self.image, 0,0) dc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID)) dc.SetBrush(wx.TRANSPARENT_BRUSH) dc.DrawRectangle(0,0, w,h) iPos = self.GetInsertionPos() dc.DrawLine(w - 10,iPos, w,iPos) class GridColMover(wx.EvtHandler): def __init__(self,grid): wx.EvtHandler.__init__(self) self.grid = grid self.lwin = grid.GetGridColLabelWindow() self.lwin.PushEventHandler(self) self.colWin = None self.ux = self.grid.GetScrollPixelsPerUnit()[0] self.startX = -10 self.cellX = 0 self.didMove = False self.isDragging = False self.Bind(wx.EVT_MOTION, self.OnMouseMove) self.Bind(wx.EVT_LEFT_DOWN, self.OnPress) self.Bind(wx.EVT_LEFT_UP, self.OnRelease) def OnMouseMove(self,evt): if not self.isDragging: evt.Skip() else: _rlSize = self.grid.GetRowLabelSize() if abs(self.startX - evt.X) >= 3 \ and abs(evt.X - self.lastX) >= 3: self.lastX = evt.X self.didMove = True sx,y = self.grid.GetViewStart() w,h = self.lwin.GetClientSize() x = sx * self.ux if (evt.X + x) < x: x = evt.X + x elif evt.X > w: x += evt.X - w if x < 1: x = 0 else: x /= self.ux if x != sx: if wx.Platform == '__WXMSW__': self.colWin.Show(False) self.grid.Scroll(x,y) x,y = self.lwin.ClientToScreenXY(evt.X,0) x,y = self.grid.ScreenToClientXY(x,y) if not self.colWin.IsShown(): self.colWin.Show(True) px = x - self.cellX if px < 0 + _rlSize: px = 0 + _rlSize if px > w - self.colWin.GetSize()[0] + _rlSize: px = w - self.colWin.GetSize()[0] + _rlSize self.colWin.DisplayAt(px,y) return def OnPress(self,evt): self.startX = self.lastX = evt.X _rlSize = self.grid.GetRowLabelSize() sx = self.grid.GetViewStart()[0] * self.ux sx -= _rlSize px,py = self.lwin.ClientToScreenXY(evt.X,evt.Y) px,py = self.grid.ScreenToClientXY(px,py) if self.grid.XToEdgeOfCol(px + sx) != wx.NOT_FOUND: evt.Skip() return self.isDragging = True self.didMove = False col = self.grid.XToCol(px + sx) rect = self.grid.ColToRect(col) self.cellX = px + sx - rect.x size = self.lwin.GetSize() rect.y = 0 rect.x -= sx + _rlSize rect.height = size[1] colImg = self._CaptureImage(rect) self.colWin = ColDragWindow(self.grid,colImg,col) self.colWin.Show(False) self.lwin.CaptureMouse() evt.Skip() def OnRelease(self,evt): if self.isDragging: self.lwin.ReleaseMouse() self.colWin.Show(False) self.isDragging = False if not self.didMove: px = self.lwin.ClientToScreenXY(self.startX,0)[0] px = self.grid.ScreenToClientXY(px,0)[0] sx = self.grid.GetViewStart()[0] * self.ux sx -= self.grid.GetRowLabelSize() col = self.grid.XToCol(px+sx) if col != wx.NOT_FOUND: self.grid.SelectCol(col,evt.ControlDown()) return else: bCol = self.colWin.GetInsertionColumn() dCol = self.colWin.GetMoveColumn() wx.PostEvent(self, GridColMoveEvent(self.grid.GetId(), dCol, bCol)) self.colWin.Destroy() evt.Skip() def _CaptureImage(self,rect): bmp = wx.EmptyBitmap(rect.width,rect.height) memdc = wx.MemoryDC() memdc.SelectObject(bmp) dc = wx.WindowDC(self.lwin) memdc.Blit(0,0, rect.width, rect.height, dc, rect.x, rect.y) memdc.SelectObject(wx.NullBitmap) return bmp class GridRowMover(wx.EvtHandler): def __init__(self,grid): wx.EvtHandler.__init__(self) self.grid = grid self.lwin = grid.GetGridRowLabelWindow() self.lwin.PushEventHandler(self) self.rowWin = None self.uy = self.grid.GetScrollPixelsPerUnit()[1] self.startY = -10 self.cellY = 0 self.didMove = False self.isDragging = False self.Bind(wx.EVT_MOTION, self.OnMouseMove) self.Bind(wx.EVT_LEFT_DOWN, self.OnPress) self.Bind(wx.EVT_LEFT_UP, self.OnRelease) def OnMouseMove(self,evt): if not self.isDragging: evt.Skip() else: _clSize = self.grid.GetColLabelSize() if abs(self.startY - evt.Y) >= 3 \ and abs(evt.Y - self.lastY) >= 3: self.lastY = evt.Y self.didMove = True x,sy = self.grid.GetViewStart() w,h = self.lwin.GetClientSizeTuple() y = sy * self.uy if (evt.Y + y) < y: y = evt.Y + y elif evt.Y > h: y += evt.Y - h if y < 1: y = 0 else: y /= self.uy if y != sy: if wx.Platform == '__WXMSW__': self.rowWin.Show(False) self.grid.Scroll(x,y) x,y = self.lwin.ClientToScreenXY(0,evt.Y) x,y = self.grid.ScreenToClientXY(x,y) if not self.rowWin.IsShown(): self.rowWin.Show(True) py = y - self.cellY if py < 0 + _clSize: py = 0 + _clSize if py > h - self.rowWin.GetSize()[1] + _clSize: py = h - self.rowWin.GetSize()[1] + _clSize self.rowWin.DisplayAt(x,py) return def OnPress(self,evt): self.startY = self.lastY = evt.Y _clSize = self.grid.GetColLabelSize() sy = self.grid.GetViewStart()[1] * self.uy sy -= _clSize px,py = self.lwin.ClientToScreenXY(evt.X,evt.Y) px,py = self.grid.ScreenToClientXY(px,py) if self.grid.YToEdgeOfRow(py + sy) != wx.NOT_FOUND: evt.Skip() return row = self.grid.YToRow(py + sy) if row == wx.NOT_FOUND: evt.Skip() return self.isDragging = True self.didMove = False rect = self.grid.RowToRect(row) self.cellY = py + sy - rect.y size = self.lwin.GetSize() rect.x = 0 rect.y -= sy + _clSize rect.width = size[0] rowImg = self._CaptureImage(rect) self.rowWin = RowDragWindow(self.grid,rowImg,row) self.rowWin.Show(False) self.lwin.CaptureMouse() evt.Skip() def OnRelease(self,evt): if self.isDragging: self.lwin.ReleaseMouse() self.rowWin.Show(False) self.isDragging = False if not self.didMove: py = self.lwin.ClientToScreenXY(0,self.startY)[1] py = self.grid.ScreenToClientXY(0,py)[1] sy = self.grid.GetViewStart()[1] * self.uy sy -= self.grid.GetColLabelSize() row = self.grid.YToRow(py + sy) if row != wx.NOT_FOUND: self.grid.SelectRow(row,evt.ControlDown()) return else: bRow = self.rowWin.GetInsertionRow() dRow = self.rowWin.GetMoveRow() wx.PostEvent(self, GridRowMoveEvent(self.grid.GetId(), dRow, bRow)) self.rowWin.Destroy() evt.Skip() def _CaptureImage(self,rect): bmp = wx.EmptyBitmap(rect.width,rect.height) memdc = wx.MemoryDC() memdc.SelectObject(bmp) dc = wx.WindowDC(self.lwin) memdc.Blit(0,0, rect.width, rect.height, dc, rect.x, rect.y) memdc.SelectObject(wx.NullBitmap) return bmp
true
true
f72480fcb8551a59e4a32a3f79265c45343d673d
26,964
py
Python
fair/forward.py
shaheen19/FAIR
345c23b3d35918729e7aa49ecb39047494c48a6e
[ "Apache-2.0" ]
1
2019-09-15T02:35:47.000Z
2019-09-15T02:35:47.000Z
fair/forward.py
shaheen19/FAIR
345c23b3d35918729e7aa49ecb39047494c48a6e
[ "Apache-2.0" ]
null
null
null
fair/forward.py
shaheen19/FAIR
345c23b3d35918729e7aa49ecb39047494c48a6e
[ "Apache-2.0" ]
null
null
null
from __future__ import division import inspect import numpy as np import warnings from scipy.optimize import root from .ancil import natural, cmip6_volcanic, cmip6_solar, historical_scaling from .constants import molwt, lifetime, radeff from .constants.general import M_ATMOS, ppm_gtc from .defaults import carbon, thermal from .forcing import ozone_tr, ozone_st, h2o_st, contrails, aerosols, bc_snow,\ landuse from .forcing.ghg import co2_log def iirf_interp(alp_b,a,tau,iirf_h,targ_iirf): """Interpolation function for finding alpha, the CO2 decay time constant scaling factor, in iirf_h equation. See Eq. (7) of Millar et al ACP (2017). Inputs: alp_b : Guess for alpha, the scale factor, for tau a : partition fractions for CO2 boxes tau : time constants for CO2 boxes iirf_h : time horizon for time-integrated airborne fraction targ_iirf: iirf_h calculated using simple parameterisation (Eq. (8), Millar et al (2017)). """ iirf_arr = alp_b*(np.sum(a*tau*(1.0 - np.exp(-iirf_h/(tau*alp_b))))) return iirf_arr - targ_iirf def iirf_simple(c_acc, temp, r0, rc, rt, iirf_max): """Simple linear iIRF relationship. Eq. (8) of Millar et al ACP (2017). Inputs: c_acc : cumulative airborne carbon anomaly (GtC) since pre-industrial temp : temperature anomaly since pre-industrial r0 : pre-industrial time-integrated airborne fraction (yr) rc : sensitivity of time-integrated airborne fraction to airborne carbon (yr/GtC) rt : sensitivity of time-integrated airborne fraction to temperature (yr/K) iirf_max : maximum value of time-integrated airborne fraction (yr) Outputs: iirf : time-integrated airborne fraction of carbon (yr) """ return np.min([r0 + rc * c_acc + rt * temp, iirf_max]) def calculate_q(tcrecs, d, f2x, tcr_dbl, nt): """If TCR and ECS are supplied, calculate the q model coefficients. See Eqs. (4) and (5) of Millar et al ACP (2017). Inputs: tcrecs : 2-element array of transient climate response (TCR) and equilibrium climate sensitivity (ECS). d : The slow and fast thermal response time constants f2x : Effective radiative forcing from a doubling of CO2 tcr_dbl : time to a doubling of CO2 under 1% per year CO2 increase, yr nt : number of timesteps Outputs: q : coefficients of slow and fast temperature change in each timestep ((nt, 2) array). """ # TODO: # error checking before call # benchmark one call per timestep and if not slower do not convert to 2D # - will make code cleaner k = 1.0 - (d/tcr_dbl)*(1.0 - np.exp(-tcr_dbl/d)) # if ECS and TCR are not time-varying, expand them to 2D array anyway if tcrecs.ndim==1: if len(tcrecs)!=2: raise ValueError( "Constant TCR and ECS should be a 2-element array") tcrecs = np.ones((nt, 2)) * tcrecs elif tcrecs.ndim==2: if tcrecs.shape!=(nt, 2): raise ValueError( "Transient TCR and ECS should be a nt x 2 array") q = (1.0 / f2x) * (1.0/(k[0]-k[1])) * np.array([ tcrecs[:,0]-tcrecs[:,1]*k[1],tcrecs[:,1]*k[0]-tcrecs[:,0]]).T return q def carbon_cycle(e0, c_acc0, temp, r0, rc, rt, iirf_max, time_scale_sf0, a, tau, iirf_h, carbon_boxes0, c_pi, c0, e1): """Calculates CO2 concentrations from emissions. Inputs: e0 : emissions of CO2 (GtC) in timestep t-1 c_acc0 : cumulative airborne carbon anomaly (GtC) since pre-industrial, timestep t-1 temp : temperature anomaly above pre-industrial (K) r0 : pre-industrial time-integrated airborne fraction (yr) rc : sensitivity of time-integrated airborne fraction to airborne carbon (yr/GtC) rt : sensitivity of time-integrated airborne fraction to temperature (yr/K) iirf_max : maximum value of time-integrated airborne fraction (yr) time_scale_sf0: initial guess of alpha scaling factor a : partition coefficient of carbon boxes tau : present-day decay time constants of CO2 (yr) iirf_h : time horizon for time-integrated airborne fraction (yr) carbon_boxes0 : carbon stored in each atmospheric reservoir at timestep t-1 (GtC) c_pi : pre-industrial concentration of CO2, ppmv c0 : concentration of CO2 in timestep t-1, ppmv e1 : emissions of CO2 in timestep t, GtC Outputs: c1 : concentrations of CO2 in timestep t, ppmv c_acc1 : cumulative airborne carbon anomaly (GtC) since pre-industrial, timestep t carbon_boxes1 : carbon stored in each atmospheric reservoir at timestep t (GtC) time_scale_sf : scale factor for CO2 decay constants """ iirf = iirf_simple(c_acc0, temp, r0, rc, rt, iirf_max) time_scale_sf = root(iirf_interp, time_scale_sf0, args=(a, tau, iirf_h, iirf))['x'] tau_new = tau * time_scale_sf carbon_boxes1 = carbon_boxes0*np.exp(-1.0/tau_new) + a*e1 / ppm_gtc c1 = np.sum(carbon_boxes1) + c_pi c_acc1 = c_acc0 + 0.5*(e1 + e0) - (c1 - c0)*ppm_gtc return c1, c_acc1, carbon_boxes1, time_scale_sf def emis_to_conc(c0, e0, e1, ts, lt, vm): """Calculate concentrations of well mixed GHGs from emissions for simple one-box model. Inputs (all can be scalar or 1D arrays for multiple species): c0: concentrations in timestep t-1 e0: emissions in timestep t-1 e1: emissions in timestep t ts: length of timestep. Use 1 for sensible results in FaIR 1.3. lt: atmospheric (e-folding) lifetime of GHG vm: conversion from emissions units (e.g. Mt) to concentrations units (e.g. ppb) Outputs: c1: concentrations in timestep t """ c1 = c0 - c0 * (1.0 - np.exp(-ts/lt)) + 0.5 * ts * (e1 + e0) * vm return c1 def forc_to_temp(t0, q, d, f, e=1.0): """Calculate temperature from a given radiative forcing. Inputs: t0: Temperature in timestep t-1 q: The matrix contributions to slow and fast temperature change calculated from ECS and TCR (2 element array) d: The slow and fast thermal response time constants (2 element array) f: radiative forcing (can be scalar or 1D array representing multiple species) Keywords: e: efficacy factor (default 1); if f is an array, e should be an array of the same length. Outputs: t1: slow and fast contributions to total temperature (2 element array) in timestep t """ t1 = t0*np.exp(-1.0/d) + q*(1.0-np.exp((-1.0)/d))*np.sum(f*e) return t1 def fair_scm( emissions=False, emissions_driven=True, C=None, other_rf=0.0, q = thermal.q, tcrecs = thermal.tcrecs, d = thermal.d, F2x = thermal.f2x, tcr_dbl = thermal.tcr_dbl, a = carbon.a, tau = carbon.tau, r0 = carbon.r0, rc = carbon.rc, rt = carbon.rt, iirf_max = carbon.iirf_max, iirf_h = carbon.iirf_h, C_pi=np.array([278., 722., 273., 34.497] + [0.]*25 + [13.0975, 547.996]), restart_in=False, restart_out=False, F_tropO3 = 0., F_aerosol = 0., F_volcanic=cmip6_volcanic.Forcing.volcanic, F_solar=cmip6_solar.Forcing.solar, F_contrails=0., F_bcsnow=0., F_landuse=0., aviNOx_frac=0., fossilCH4_frac=0., natural=natural.Emissions.emissions, efficacy=np.array([1.]*9 + [3.] + [1.]*3), scale=None, oxCH4_frac=0.61, ghg_forcing="Etminan", stwv_from_ch4=None, b_aero = np.array([-6.2227e-3, 0.0, -3.8392e-4, -1.16551e-3, 1.601537e-2, -1.45339e-3, -1.55605e-3]), b_tro3 = np.array([2.8249e-4, 1.0695e-4, -9.3604e-4, 99.7831e-4]), ghan_params = np.array([-1.95011431, 0.01107147, 0.01387492]), stevens_params = np.array([0.001875, 0.634, 60.]), useMultigas=True, useStevenson=True, lifetimes=False, aerosol_forcing="aerocom+ghan", scaleAerosolAR5=True, fixPre1850RCP=True, useTropO3TFeedback=True, scaleHistoricalAR5=False, contrail_forcing='NOx', kerosene_supply=0., landuse_forcing='co2', ): # is iirf_h < iirf_max? Don't stop the code, but warn user if iirf_h < iirf_max: warnings.warn('iirf_h=%f, which is less than iirf_max (%f)' % (iirf_h, iirf_max), RuntimeWarning) # Conversion between ppb/ppt concentrations and Mt/kt emissions # in the RCP databases ppb = Mt and ppt = kt so factor always 1e18 emis2conc = M_ATMOS/1e18*np.asarray(molwt.aslist)/molwt.AIR # Funny units for nitrogen emissions - N2O is expressed in N2 equivalent n2o_sf = molwt.N2O/molwt.N2 emis2conc[2] = emis2conc[2] / n2o_sf # Convert any list to a numpy array for (a) speed and (b) consistency. # Goes through all variables in scope and converts them. frame = inspect.currentframe() args, _, _, values = inspect.getargvalues(frame) for arg_to_check in args: if type(values[arg_to_check]) is list: exec(arg_to_check + '= np.array(' + arg_to_check + ')') # Set up the output timeseries variables depending on options and perform # basic sense checks if useMultigas: ngas = 31 nF = 13 if emissions_driven: if type(emissions) is not np.ndarray or emissions.shape[1] != 40: raise ValueError( "emissions timeseries should be a nt x 40 numpy array") carbon_boxes_shape = (emissions.shape[0], a.shape[0]) thermal_boxes_shape = (emissions.shape[0], d.shape[0]) nt = emissions.shape[0] else: if type(C) is not np.ndarray or C.shape[1] != ngas: raise ValueError( "C timeseries should be a nt x %d numpy array" % ngas) thermal_boxes_shape = (C.shape[0], d.shape[0]) nt = C.shape[0] if np.isscalar(fossilCH4_frac): fossilCH4_frac = np.ones(nt) * fossilCH4_frac # If custom gas lifetimes are supplied, use them, else import defaults if type(lifetimes) is np.ndarray: if len(lifetimes)!=ngas: raise ValueError( "custom GHG lifetime array must have " + str(ngas) + " elements") else: lifetimes = lifetime.aslist # Select the desired GHG forcing relationship and populate # stratospheric water vapour from methane scale factor if not specified # by user if ghg_forcing.lower()=="etminan": from .forcing.ghg import etminan as ghg if stwv_from_ch4==None: stwv_from_ch4=0.12 elif ghg_forcing.lower()=="myhre": from .forcing.ghg import myhre as ghg if stwv_from_ch4==None: stwv_from_ch4=0.15 else: raise ValueError( "ghg_forcing should be 'etminan' (default) or 'myhre'") # Check natural emissions and convert to 2D array if necessary if type(natural) in [float,int]: natural = natural * np.ones((nt,2)) elif type(natural) is np.ndarray: if natural.ndim==1: if natural.shape[0]!=2: raise ValueError( "natural emissions should be a 2-element or nt x 2 " + "array") natural = np.tile(natural, nt).reshape((nt,2)) elif natural.ndim==2: if natural.shape[1]!=2 or natural.shape[0]!=nt: raise ValueError( "natural emissions should be a 2-element or nt x 2 " + "array") else: raise ValueError( "natural emissions should be a scalar, 2-element, or nt x 2 " + "array") # check scale factor is correct shape. If 1D inflate to 2D if scale is None: scale = np.ones((nt,nF)) elif scale.shape[-1]==nF: if scale.ndim==2 and scale.shape[0]==nt: pass elif scale.ndim==1: scale = np.tile(scale, nt).reshape((nt,nF)) else: raise ValueError("in multi-gas mode, scale should be None, or a "+ "(13,) or (nt, 13) array") # if scaling the historical time series to match AR5, apply these # factors to whatever the user specifies if scaleHistoricalAR5: scale=scale*historical_scaling.all[:nt,:] else: ngas = 1 nF = 1 if emissions_driven: if type(emissions) is np.ndarray: if emissions.ndim != 1: raise ValueError( "In CO2-only mode, emissions should be a 1D array") nt = emissions.shape[0] carbon_boxes_shape = (nt, a.shape[0]) thermal_boxes_shape = (nt, d.shape[0]) elif type(other_rf) is np.ndarray: if other_rf.ndim != 1: raise ValueError( "In CO2-only mode, other_rf should be a 1D array") nt = other_rf.shape[0] carbon_boxes_shape = (nt, a.shape[0]) thermal_boxes_shape = (nt, d.shape[0]) emissions = np.zeros(nt) else: raise ValueError( "Neither emissions or other_rf is defined as a timeseries") else: if type(C) is not np.ndarray or C.ndim != 1: raise ValueError( "In CO2-only mode, concentrations should be a 1D array") nt = C.shape[0] thermal_boxes_shape = (nt, d.shape[0]) # expand C to 2D array for consistency with other calcs C = C.reshape((nt, 1)) # check scale factor is correct shape - either scalar or 1D # needs try/except really if scale is None: scale = np.ones(nt) elif np.isscalar(scale): scale = np.ones(nt) * scale elif scale.ndim==1 and scale.shape[0]==nt: pass else: raise ValueError("in CO2-only mode, scale should be None, a "+ "scalar or a (nt,) array") # if scaling the historical time series to match AR5, apply these # factors to whatever the user specifies if scaleHistoricalAR5: scale=scale*historical_scaling.co2[:nt] # If TCR and ECS are supplied, calculate q coefficients if type(tcrecs) is np.ndarray: q = calculate_q(tcrecs, d, F2x, tcr_dbl, nt) # Check a and tau are same size if a.ndim != 1: raise ValueError("a should be a 1D array") if tau.ndim != 1: raise ValueError("tau should be a 1D array") if len(a) != len(tau): raise ValueError("a and tau should be the same size") if not np.isclose(np.sum(a), 1.0): raise ValueError("a should sum to one") # Allocate intermediate and output arrays F = np.zeros((nt, nF)) C_acc = np.zeros(nt) T_j = np.zeros(thermal_boxes_shape) T = np.zeros(nt) C_0 = np.copy(C_pi) if emissions_driven: C = np.zeros((nt, ngas)) R_i = np.zeros(carbon_boxes_shape) if restart_in: R_minus1 = restart_in[0] T_j_minus1 = restart_in[1] C_acc_minus1 = restart_in[2] E_minus1 = restart_in[3] C_minus1 = np.sum(R_minus1,axis=-1) + C_0[0] C[0,0], C_acc[0], R_i[0,:], time_scale_sf = carbon_cycle( E_minus1, C_acc_minus1, np.sum(T_j_minus1), r0, rc, rt, iirf_max, 0.16, a, tau, iirf_h, R_minus1, C_pi[0], C_minus1, emissions[0] ) if np.isscalar(other_rf): F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf else: F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf[0] F[0,0] = F[0,0] * scale[0] T_j[0,:] = forc_to_temp(T_j_minus1, q[0,:], d, F[0,:]) T[0]=np.sum(T_j[0,:],axis=-1) else: # Initialise the carbon pools to be correct for first timestep in # numerical method if emissions_driven: if useMultigas: R_i[0,:] = a * (np.sum(emissions[0,1:3])) / ppm_gtc C[0,1:] = C_0[1:] else: R_i[0,:] = a * emissions[0,np.newaxis] / ppm_gtc C[0,0] = np.sum(R_i[0,:],axis=-1) + C_0[0] if useMultigas: # CO2, CH4 and N2O are co-dependent F[0,0:3] = ghg(C[0,0:3], C_pi[0:3], F2x=F2x) # Minor (F- and H-gases) are linear in concentration # the factor of 0.001 here is because radiative efficiencies are given # in W/m2/ppb and concentrations of minor gases are in ppt. F[0,3] = np.sum((C[0,3:] - C_pi[3:]) * radeff.aslist[3:] * 0.001) # Tropospheric ozone: if emissions_driven: if useStevenson: F[0,4] = ozone_tr.stevenson(emissions[0,:], C[0,1], T=np.sum(T_j[0,:]), feedback=useTropO3TFeedback, fix_pre1850_RCP=fixPre1850RCP) else: F[0,4] = ozone_tr.regress(emissions[0,:], beta=b_tro3) else: F[:,4] = F_tropO3 # Stratospheric ozone depends on concentrations of ODSs (index 15-30) F[0,5] = ozone_st.magicc(C[0,15:], C_pi[15:]) # Stratospheric water vapour is a function of the methane ERF F[0,6] = h2o_st.linear(F[0,1], ratio=stwv_from_ch4) # Forcing from contrails. No climate feedback so can live outside # of forward model in this version if emissions_driven: if contrail_forcing.lower()[0]=='n': # from NOx emissions F[:,7] = contrails.from_aviNOx(emissions, aviNOx_frac) elif contrail_forcing.lower()[0]=='f': # from kerosene production F[:,7] = contrails.from_fuel(kerosene_supply) elif contrail_forcing.lower()[0]=='e': # external forcing timeseries F[:,7] = F_contrails else: raise ValueError("contrails must be one of 'NOx' (estimated "+ "from NOx emissions), 'fuel' (estimated from annual jet fuel "+ "supplied) or 'external' (an external forcing time series).") else: F[:,7] = F_contrails # Forcing from aerosols - again no feedback dependence if emissions_driven: if aerosol_forcing.lower()=='stevens': F[:,8] = aerosols.Stevens(emissions, stevens_params=stevens_params) elif 'aerocom' in aerosol_forcing.lower(): F[:,8] = aerosols.aerocom_direct(emissions, beta=b_aero) if 'ghan' in aerosol_forcing.lower(): F[:,8] = F[:,8] + aerosols.ghan_indirect(emissions, scale_AR5=scaleAerosolAR5, fix_pre1850_RCP=fixPre1850RCP, ghan_params=ghan_params) elif aerosol_forcing.lower()[0] == 'e': F[:,8] = F_aerosol else: raise ValueError("aerosol_forcing should be one of 'stevens', " + "aerocom, aerocom+ghan or external") else: F[:,8] = F_aerosol # Black carbon on snow - no feedback dependence if emissions_driven: F[:,9] = bc_snow.linear(emissions) else: F[:,9] = F_bcsnow # Land use change - either use a scaling with cumulative CO2 emissions # or an external time series if emissions_driven: if landuse_forcing.lower()[0]=='c': F[:,10] = landuse.cumulative(emissions) elif landuse_forcing.lower()[0]=='e': F[:,10] = F_landuse else: raise ValueError( "landuse_forcing should be one of 'co2' or 'external'") else: F[:,10] = F_landuse # Volcanic and solar copied straight to the output arrays F[:,11] = F_volcanic F[:,12] = F_solar # multiply by scale factors F[0,:] = F[0,:] * scale[0,:] else: if np.isscalar(other_rf): F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf else: F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf[0] F[0,0] = F[0,0] * scale[0] if restart_in == False: # Update the thermal response boxes T_j[0,:] = (q[0,:]/d)*(np.sum(F[0,:])) # Sum the thermal response boxes to get the total temperature anomaly T[0]=np.sum(T_j[0,:],axis=-1) for t in range(1,nt): if emissions_driven: if useMultigas: if t == 1: time_scale_sf = 0.16 # Calculate concentrations # a. CARBON DIOXIDE # Firstly add any oxidised methane from last year to the CO2 # pool oxidised_CH4 = ((C[t-1,1]-C_pi[1]) * (1.0 - np.exp(-1.0/lifetimes[1])) * (molwt.C/molwt.CH4 * 0.001 * oxCH4_frac * fossilCH4_frac[t])) oxidised_CH4 = np.max((oxidised_CH4, 0)) C[t,0], C_acc[t], R_i[t,:], time_scale_sf = carbon_cycle( np.sum(emissions[t-1,1:3]), C_acc[t-1], T[t-1], r0, rc, rt, iirf_max, time_scale_sf, a, tau, iirf_h, R_i[t-1,:] + oxidised_CH4, C_pi[0], C[t-1,0], np.sum(emissions[t,1:3]) ) # b. METHANE C[t,1] = emis_to_conc( C[t-1,1], emissions[t-1,3]+natural[t,0], emissions[t,3]+natural[t,0], 1.0, lifetimes[1], 1.0/emis2conc[1] ) # c. NITROUS OXIDE C[t,2] = emis_to_conc( C[t-1,2], emissions[t-1,4]+natural[t,1], emissions[t,4]+natural[t,1], 1.0, lifetimes[2], 1.0/emis2conc[2] ) # d. OTHER WMGHGs C[t,3:] = emis_to_conc( C[t-1,3:], emissions[t-1,12:], emissions[t,12:], 1.0, np.array(lifetimes[3:]), 1.0/emis2conc[3:] ) # 2. Radiative forcing F[t,0:3] = ghg(C[t,0:3], C_pi[0:3], F2x=F2x) F[t,3] = np.sum((C[t,3:] - C_pi[3:]) * radeff.aslist[3:] * 0.001) if useStevenson: F[t,4] = ozone_tr.stevenson(emissions[t,:], C[t,1], T=T[t-1], feedback=useTropO3TFeedback, fix_pre1850_RCP=fixPre1850RCP) else: F[t,4] = ozone_tr.regress(emissions[t,:], beta=b_tro3) F[t,5] = ozone_st.magicc(C[t,15:], C_pi[15:]) F[t,6] = h2o_st.linear(F[t,1], ratio=stwv_from_ch4) # multiply by scale factors F[t,:] = F[t,:] * scale[t,:] # 3. Temperature # Update the thermal response boxes T_j[t,:] = forc_to_temp( T_j[t-1,:], q[t,:], d, F[t,:], e=efficacy) # Sum the thermal response boxes to get the total temperature T[t]=np.sum(T_j[t,:],axis=-1) else: if t == 1: time_scale_sf = 0.16 C[t,0], C_acc[t], R_i[t,:], time_scale_sf = carbon_cycle( emissions[t-1], C_acc[t-1], T[t-1], r0, rc, rt, iirf_max, time_scale_sf, a, tau, iirf_h, R_i[t-1,:], C_pi[0], C[t-1,0], emissions[t] ) if np.isscalar(other_rf): F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf else: F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf[t] F[t,0] = F[t,0] * scale[t] T_j[t,:] = forc_to_temp(T_j[t-1,:], q[t,:], d, F[t,:]) T[t]=np.sum(T_j[t,:],axis=-1) else: if useMultigas: F[t,0:3] = ghg(C[t,0:3], C_pi[0:3], F2x=F2x) F[t,3] = np.sum((C[t,3:] - C_pi[3:]) * radeff.aslist[3:] * 0.001) F[t,5] = ozone_st.magicc(C[t,15:], C_pi[15:]) F[t,6] = h2o_st.linear(F[t,1], ratio=stwv_from_ch4) # multiply by scale factors F[t,:] = F[t,:] * scale[t,:] # 3. Temperature # Update the thermal response boxes T_j[t,:] = T_j[t,:] = forc_to_temp( T_j[t-1,:], q[t,:], d, F[t,:], e=efficacy) # Sum the thermal response boxes to get the total temperature T[t]=np.sum(T_j[t,:],axis=-1) else: if np.isscalar(other_rf): F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf else: F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf[t] F[t,0] = F[t,0] * scale[t] T_j[t,:] = forc_to_temp(T_j[t-1,:], q[t,:], d, F[t,:]) T[t]=np.sum(T_j[t,:],axis=-1) if not useMultigas: C = np.squeeze(C) F = np.squeeze(F) if restart_out: if useMultigas: E_minus1 = np.sum(emissions[-1,1:3]) else: E_minus1 = emissions[-1] restart_out_val=(R_i[-1],T_j[-1],C_acc[-1],E_minus1) return C, F, T, restart_out_val else: return C, F, T
37.870787
83
0.528371
from __future__ import division import inspect import numpy as np import warnings from scipy.optimize import root from .ancil import natural, cmip6_volcanic, cmip6_solar, historical_scaling from .constants import molwt, lifetime, radeff from .constants.general import M_ATMOS, ppm_gtc from .defaults import carbon, thermal from .forcing import ozone_tr, ozone_st, h2o_st, contrails, aerosols, bc_snow,\ landuse from .forcing.ghg import co2_log def iirf_interp(alp_b,a,tau,iirf_h,targ_iirf): iirf_arr = alp_b*(np.sum(a*tau*(1.0 - np.exp(-iirf_h/(tau*alp_b))))) return iirf_arr - targ_iirf def iirf_simple(c_acc, temp, r0, rc, rt, iirf_max): return np.min([r0 + rc * c_acc + rt * temp, iirf_max]) def calculate_q(tcrecs, d, f2x, tcr_dbl, nt): k = 1.0 - (d/tcr_dbl)*(1.0 - np.exp(-tcr_dbl/d)) if tcrecs.ndim==1: if len(tcrecs)!=2: raise ValueError( "Constant TCR and ECS should be a 2-element array") tcrecs = np.ones((nt, 2)) * tcrecs elif tcrecs.ndim==2: if tcrecs.shape!=(nt, 2): raise ValueError( "Transient TCR and ECS should be a nt x 2 array") q = (1.0 / f2x) * (1.0/(k[0]-k[1])) * np.array([ tcrecs[:,0]-tcrecs[:,1]*k[1],tcrecs[:,1]*k[0]-tcrecs[:,0]]).T return q def carbon_cycle(e0, c_acc0, temp, r0, rc, rt, iirf_max, time_scale_sf0, a, tau, iirf_h, carbon_boxes0, c_pi, c0, e1): iirf = iirf_simple(c_acc0, temp, r0, rc, rt, iirf_max) time_scale_sf = root(iirf_interp, time_scale_sf0, args=(a, tau, iirf_h, iirf))['x'] tau_new = tau * time_scale_sf carbon_boxes1 = carbon_boxes0*np.exp(-1.0/tau_new) + a*e1 / ppm_gtc c1 = np.sum(carbon_boxes1) + c_pi c_acc1 = c_acc0 + 0.5*(e1 + e0) - (c1 - c0)*ppm_gtc return c1, c_acc1, carbon_boxes1, time_scale_sf def emis_to_conc(c0, e0, e1, ts, lt, vm): c1 = c0 - c0 * (1.0 - np.exp(-ts/lt)) + 0.5 * ts * (e1 + e0) * vm return c1 def forc_to_temp(t0, q, d, f, e=1.0): t1 = t0*np.exp(-1.0/d) + q*(1.0-np.exp((-1.0)/d))*np.sum(f*e) return t1 def fair_scm( emissions=False, emissions_driven=True, C=None, other_rf=0.0, q = thermal.q, tcrecs = thermal.tcrecs, d = thermal.d, F2x = thermal.f2x, tcr_dbl = thermal.tcr_dbl, a = carbon.a, tau = carbon.tau, r0 = carbon.r0, rc = carbon.rc, rt = carbon.rt, iirf_max = carbon.iirf_max, iirf_h = carbon.iirf_h, C_pi=np.array([278., 722., 273., 34.497] + [0.]*25 + [13.0975, 547.996]), restart_in=False, restart_out=False, F_tropO3 = 0., F_aerosol = 0., F_volcanic=cmip6_volcanic.Forcing.volcanic, F_solar=cmip6_solar.Forcing.solar, F_contrails=0., F_bcsnow=0., F_landuse=0., aviNOx_frac=0., fossilCH4_frac=0., natural=natural.Emissions.emissions, efficacy=np.array([1.]*9 + [3.] + [1.]*3), scale=None, oxCH4_frac=0.61, ghg_forcing="Etminan", stwv_from_ch4=None, b_aero = np.array([-6.2227e-3, 0.0, -3.8392e-4, -1.16551e-3, 1.601537e-2, -1.45339e-3, -1.55605e-3]), b_tro3 = np.array([2.8249e-4, 1.0695e-4, -9.3604e-4, 99.7831e-4]), ghan_params = np.array([-1.95011431, 0.01107147, 0.01387492]), stevens_params = np.array([0.001875, 0.634, 60.]), useMultigas=True, useStevenson=True, lifetimes=False, aerosol_forcing="aerocom+ghan", scaleAerosolAR5=True, fixPre1850RCP=True, useTropO3TFeedback=True, scaleHistoricalAR5=False, contrail_forcing='NOx', kerosene_supply=0., landuse_forcing='co2', ): if iirf_h < iirf_max: warnings.warn('iirf_h=%f, which is less than iirf_max (%f)' % (iirf_h, iirf_max), RuntimeWarning) # Conversion between ppb/ppt concentrations and Mt/kt emissions # in the RCP databases ppb = Mt and ppt = kt so factor always 1e18 emis2conc = M_ATMOS/1e18*np.asarray(molwt.aslist)/molwt.AIR # Funny units for nitrogen emissions - N2O is expressed in N2 equivalent n2o_sf = molwt.N2O/molwt.N2 emis2conc[2] = emis2conc[2] / n2o_sf # Convert any list to a numpy array for (a) speed and (b) consistency. # Goes through all variables in scope and converts them. frame = inspect.currentframe() args, _, _, values = inspect.getargvalues(frame) for arg_to_check in args: if type(values[arg_to_check]) is list: exec(arg_to_check + '= np.array(' + arg_to_check + ')') # Set up the output timeseries variables depending on options and perform # basic sense checks if useMultigas: ngas = 31 nF = 13 if emissions_driven: if type(emissions) is not np.ndarray or emissions.shape[1] != 40: raise ValueError( "emissions timeseries should be a nt x 40 numpy array") carbon_boxes_shape = (emissions.shape[0], a.shape[0]) thermal_boxes_shape = (emissions.shape[0], d.shape[0]) nt = emissions.shape[0] else: if type(C) is not np.ndarray or C.shape[1] != ngas: raise ValueError( "C timeseries should be a nt x %d numpy array" % ngas) thermal_boxes_shape = (C.shape[0], d.shape[0]) nt = C.shape[0] if np.isscalar(fossilCH4_frac): fossilCH4_frac = np.ones(nt) * fossilCH4_frac # If custom gas lifetimes are supplied, use them, else import defaults if type(lifetimes) is np.ndarray: if len(lifetimes)!=ngas: raise ValueError( "custom GHG lifetime array must have " + str(ngas) + " elements") else: lifetimes = lifetime.aslist # Select the desired GHG forcing relationship and populate # stratospheric water vapour from methane scale factor if not specified # by user if ghg_forcing.lower()=="etminan": from .forcing.ghg import etminan as ghg if stwv_from_ch4==None: stwv_from_ch4=0.12 elif ghg_forcing.lower()=="myhre": from .forcing.ghg import myhre as ghg if stwv_from_ch4==None: stwv_from_ch4=0.15 else: raise ValueError( "ghg_forcing should be 'etminan' (default) or 'myhre'") # Check natural emissions and convert to 2D array if necessary if type(natural) in [float,int]: natural = natural * np.ones((nt,2)) elif type(natural) is np.ndarray: if natural.ndim==1: if natural.shape[0]!=2: raise ValueError( "natural emissions should be a 2-element or nt x 2 " + "array") natural = np.tile(natural, nt).reshape((nt,2)) elif natural.ndim==2: if natural.shape[1]!=2 or natural.shape[0]!=nt: raise ValueError( "natural emissions should be a 2-element or nt x 2 " + "array") else: raise ValueError( "natural emissions should be a scalar, 2-element, or nt x 2 " + "array") # check scale factor is correct shape. If 1D inflate to 2D if scale is None: scale = np.ones((nt,nF)) elif scale.shape[-1]==nF: if scale.ndim==2 and scale.shape[0]==nt: pass elif scale.ndim==1: scale = np.tile(scale, nt).reshape((nt,nF)) else: raise ValueError("in multi-gas mode, scale should be None, or a "+ "(13,) or (nt, 13) array") # if scaling the historical time series to match AR5, apply these # factors to whatever the user specifies if scaleHistoricalAR5: scale=scale*historical_scaling.all[:nt,:] else: ngas = 1 nF = 1 if emissions_driven: if type(emissions) is np.ndarray: if emissions.ndim != 1: raise ValueError( "In CO2-only mode, emissions should be a 1D array") nt = emissions.shape[0] carbon_boxes_shape = (nt, a.shape[0]) thermal_boxes_shape = (nt, d.shape[0]) elif type(other_rf) is np.ndarray: if other_rf.ndim != 1: raise ValueError( "In CO2-only mode, other_rf should be a 1D array") nt = other_rf.shape[0] carbon_boxes_shape = (nt, a.shape[0]) thermal_boxes_shape = (nt, d.shape[0]) emissions = np.zeros(nt) else: raise ValueError( "Neither emissions or other_rf is defined as a timeseries") else: if type(C) is not np.ndarray or C.ndim != 1: raise ValueError( "In CO2-only mode, concentrations should be a 1D array") nt = C.shape[0] thermal_boxes_shape = (nt, d.shape[0]) # expand C to 2D array for consistency with other calcs C = C.reshape((nt, 1)) # check scale factor is correct shape - either scalar or 1D # needs try/except really if scale is None: scale = np.ones(nt) elif np.isscalar(scale): scale = np.ones(nt) * scale elif scale.ndim==1 and scale.shape[0]==nt: pass else: raise ValueError("in CO2-only mode, scale should be None, a "+ "scalar or a (nt,) array") # if scaling the historical time series to match AR5, apply these # factors to whatever the user specifies if scaleHistoricalAR5: scale=scale*historical_scaling.co2[:nt] # If TCR and ECS are supplied, calculate q coefficients if type(tcrecs) is np.ndarray: q = calculate_q(tcrecs, d, F2x, tcr_dbl, nt) # Check a and tau are same size if a.ndim != 1: raise ValueError("a should be a 1D array") if tau.ndim != 1: raise ValueError("tau should be a 1D array") if len(a) != len(tau): raise ValueError("a and tau should be the same size") if not np.isclose(np.sum(a), 1.0): raise ValueError("a should sum to one") # Allocate intermediate and output arrays F = np.zeros((nt, nF)) C_acc = np.zeros(nt) T_j = np.zeros(thermal_boxes_shape) T = np.zeros(nt) C_0 = np.copy(C_pi) if emissions_driven: C = np.zeros((nt, ngas)) R_i = np.zeros(carbon_boxes_shape) if restart_in: R_minus1 = restart_in[0] T_j_minus1 = restart_in[1] C_acc_minus1 = restart_in[2] E_minus1 = restart_in[3] C_minus1 = np.sum(R_minus1,axis=-1) + C_0[0] C[0,0], C_acc[0], R_i[0,:], time_scale_sf = carbon_cycle( E_minus1, C_acc_minus1, np.sum(T_j_minus1), r0, rc, rt, iirf_max, 0.16, a, tau, iirf_h, R_minus1, C_pi[0], C_minus1, emissions[0] ) if np.isscalar(other_rf): F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf else: F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf[0] F[0,0] = F[0,0] * scale[0] T_j[0,:] = forc_to_temp(T_j_minus1, q[0,:], d, F[0,:]) T[0]=np.sum(T_j[0,:],axis=-1) else: # Initialise the carbon pools to be correct for first timestep in # numerical method if emissions_driven: if useMultigas: R_i[0,:] = a * (np.sum(emissions[0,1:3])) / ppm_gtc C[0,1:] = C_0[1:] else: R_i[0,:] = a * emissions[0,np.newaxis] / ppm_gtc C[0,0] = np.sum(R_i[0,:],axis=-1) + C_0[0] if useMultigas: # CO2, CH4 and N2O are co-dependent F[0,0:3] = ghg(C[0,0:3], C_pi[0:3], F2x=F2x) # Minor (F- and H-gases) are linear in concentration # the factor of 0.001 here is because radiative efficiencies are given # in W/m2/ppb and concentrations of minor gases are in ppt. F[0,3] = np.sum((C[0,3:] - C_pi[3:]) * radeff.aslist[3:] * 0.001) # Tropospheric ozone: if emissions_driven: if useStevenson: F[0,4] = ozone_tr.stevenson(emissions[0,:], C[0,1], T=np.sum(T_j[0,:]), feedback=useTropO3TFeedback, fix_pre1850_RCP=fixPre1850RCP) else: F[0,4] = ozone_tr.regress(emissions[0,:], beta=b_tro3) else: F[:,4] = F_tropO3 # Stratospheric ozone depends on concentrations of ODSs (index 15-30) F[0,5] = ozone_st.magicc(C[0,15:], C_pi[15:]) # Stratospheric water vapour is a function of the methane ERF F[0,6] = h2o_st.linear(F[0,1], ratio=stwv_from_ch4) # Forcing from contrails. No climate feedback so can live outside # of forward model in this version if emissions_driven: if contrail_forcing.lower()[0]=='n': # from NOx emissions F[:,7] = contrails.from_aviNOx(emissions, aviNOx_frac) elif contrail_forcing.lower()[0]=='f': # from kerosene production F[:,7] = contrails.from_fuel(kerosene_supply) elif contrail_forcing.lower()[0]=='e': # external forcing timeseries F[:,7] = F_contrails else: raise ValueError("contrails must be one of 'NOx' (estimated "+ "from NOx emissions), 'fuel' (estimated from annual jet fuel "+ "supplied) or 'external' (an external forcing time series).") else: F[:,7] = F_contrails # Forcing from aerosols - again no feedback dependence if emissions_driven: if aerosol_forcing.lower()=='stevens': F[:,8] = aerosols.Stevens(emissions, stevens_params=stevens_params) elif 'aerocom' in aerosol_forcing.lower(): F[:,8] = aerosols.aerocom_direct(emissions, beta=b_aero) if 'ghan' in aerosol_forcing.lower(): F[:,8] = F[:,8] + aerosols.ghan_indirect(emissions, scale_AR5=scaleAerosolAR5, fix_pre1850_RCP=fixPre1850RCP, ghan_params=ghan_params) elif aerosol_forcing.lower()[0] == 'e': F[:,8] = F_aerosol else: raise ValueError("aerosol_forcing should be one of 'stevens', " + "aerocom, aerocom+ghan or external") else: F[:,8] = F_aerosol # Black carbon on snow - no feedback dependence if emissions_driven: F[:,9] = bc_snow.linear(emissions) else: F[:,9] = F_bcsnow # Land use change - either use a scaling with cumulative CO2 emissions # or an external time series if emissions_driven: if landuse_forcing.lower()[0]=='c': F[:,10] = landuse.cumulative(emissions) elif landuse_forcing.lower()[0]=='e': F[:,10] = F_landuse else: raise ValueError( "landuse_forcing should be one of 'co2' or 'external'") else: F[:,10] = F_landuse # Volcanic and solar copied straight to the output arrays F[:,11] = F_volcanic F[:,12] = F_solar # multiply by scale factors F[0,:] = F[0,:] * scale[0,:] else: if np.isscalar(other_rf): F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf else: F[0,0] = co2_log(C[0,0], C_pi[0], F2x) + other_rf[0] F[0,0] = F[0,0] * scale[0] if restart_in == False: # Update the thermal response boxes T_j[0,:] = (q[0,:]/d)*(np.sum(F[0,:])) # Sum the thermal response boxes to get the total temperature anomaly T[0]=np.sum(T_j[0,:],axis=-1) for t in range(1,nt): if emissions_driven: if useMultigas: if t == 1: time_scale_sf = 0.16 # Calculate concentrations # a. CARBON DIOXIDE # Firstly add any oxidised methane from last year to the CO2 # pool oxidised_CH4 = ((C[t-1,1]-C_pi[1]) * (1.0 - np.exp(-1.0/lifetimes[1])) * (molwt.C/molwt.CH4 * 0.001 * oxCH4_frac * fossilCH4_frac[t])) oxidised_CH4 = np.max((oxidised_CH4, 0)) C[t,0], C_acc[t], R_i[t,:], time_scale_sf = carbon_cycle( np.sum(emissions[t-1,1:3]), C_acc[t-1], T[t-1], r0, rc, rt, iirf_max, time_scale_sf, a, tau, iirf_h, R_i[t-1,:] + oxidised_CH4, C_pi[0], C[t-1,0], np.sum(emissions[t,1:3]) ) # b. METHANE C[t,1] = emis_to_conc( C[t-1,1], emissions[t-1,3]+natural[t,0], emissions[t,3]+natural[t,0], 1.0, lifetimes[1], 1.0/emis2conc[1] ) # c. NITROUS OXIDE C[t,2] = emis_to_conc( C[t-1,2], emissions[t-1,4]+natural[t,1], emissions[t,4]+natural[t,1], 1.0, lifetimes[2], 1.0/emis2conc[2] ) # d. OTHER WMGHGs C[t,3:] = emis_to_conc( C[t-1,3:], emissions[t-1,12:], emissions[t,12:], 1.0, np.array(lifetimes[3:]), 1.0/emis2conc[3:] ) # 2. Radiative forcing F[t,0:3] = ghg(C[t,0:3], C_pi[0:3], F2x=F2x) F[t,3] = np.sum((C[t,3:] - C_pi[3:]) * radeff.aslist[3:] * 0.001) if useStevenson: F[t,4] = ozone_tr.stevenson(emissions[t,:], C[t,1], T=T[t-1], feedback=useTropO3TFeedback, fix_pre1850_RCP=fixPre1850RCP) else: F[t,4] = ozone_tr.regress(emissions[t,:], beta=b_tro3) F[t,5] = ozone_st.magicc(C[t,15:], C_pi[15:]) F[t,6] = h2o_st.linear(F[t,1], ratio=stwv_from_ch4) # multiply by scale factors F[t,:] = F[t,:] * scale[t,:] # 3. Temperature # Update the thermal response boxes T_j[t,:] = forc_to_temp( T_j[t-1,:], q[t,:], d, F[t,:], e=efficacy) # Sum the thermal response boxes to get the total temperature T[t]=np.sum(T_j[t,:],axis=-1) else: if t == 1: time_scale_sf = 0.16 C[t,0], C_acc[t], R_i[t,:], time_scale_sf = carbon_cycle( emissions[t-1], C_acc[t-1], T[t-1], r0, rc, rt, iirf_max, time_scale_sf, a, tau, iirf_h, R_i[t-1,:], C_pi[0], C[t-1,0], emissions[t] ) if np.isscalar(other_rf): F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf else: F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf[t] F[t,0] = F[t,0] * scale[t] T_j[t,:] = forc_to_temp(T_j[t-1,:], q[t,:], d, F[t,:]) T[t]=np.sum(T_j[t,:],axis=-1) else: if useMultigas: F[t,0:3] = ghg(C[t,0:3], C_pi[0:3], F2x=F2x) F[t,3] = np.sum((C[t,3:] - C_pi[3:]) * radeff.aslist[3:] * 0.001) F[t,5] = ozone_st.magicc(C[t,15:], C_pi[15:]) F[t,6] = h2o_st.linear(F[t,1], ratio=stwv_from_ch4) # multiply by scale factors F[t,:] = F[t,:] * scale[t,:] # 3. Temperature # Update the thermal response boxes T_j[t,:] = T_j[t,:] = forc_to_temp( T_j[t-1,:], q[t,:], d, F[t,:], e=efficacy) # Sum the thermal response boxes to get the total temperature T[t]=np.sum(T_j[t,:],axis=-1) else: if np.isscalar(other_rf): F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf else: F[t,0] = co2_log(C[t,0], C_pi[0], F2x) + other_rf[t] F[t,0] = F[t,0] * scale[t] T_j[t,:] = forc_to_temp(T_j[t-1,:], q[t,:], d, F[t,:]) T[t]=np.sum(T_j[t,:],axis=-1) if not useMultigas: C = np.squeeze(C) F = np.squeeze(F) if restart_out: if useMultigas: E_minus1 = np.sum(emissions[-1,1:3]) else: E_minus1 = emissions[-1] restart_out_val=(R_i[-1],T_j[-1],C_acc[-1],E_minus1) return C, F, T, restart_out_val else: return C, F, T
true
true
f7248131aa87bcae1465dd9f6fc9da5036cc235d
29,226
py
Python
absl/flags/tests/_validators_test.py
mcx/abseil-py
58ead8c22230a2493006fa0ab9f76776b6e7280f
[ "Apache-2.0" ]
1,969
2017-04-24T22:21:29.000Z
2022-03-30T13:27:09.000Z
absl/flags/tests/_validators_test.py
mcx/abseil-py
58ead8c22230a2493006fa0ab9f76776b6e7280f
[ "Apache-2.0" ]
111
2017-09-27T05:45:53.000Z
2022-03-29T16:48:49.000Z
absl/flags/tests/_validators_test.py
mcx/abseil-py
58ead8c22230a2493006fa0ab9f76776b6e7280f
[ "Apache-2.0" ]
240
2017-09-26T01:18:10.000Z
2022-03-31T06:24:40.000Z
# Copyright 2017 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing that flags validators framework does work. This file tests that each flag validator called when it should be, and that failed validator will throw an exception, etc. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import warnings from absl.flags import _defines from absl.flags import _exceptions from absl.flags import _flagvalues from absl.flags import _validators from absl.testing import absltest class SingleFlagValidatorTest(absltest.TestCase): """Testing _validators.register_validator() method.""" def setUp(self): super(SingleFlagValidatorTest, self).setUp() self.flag_values = _flagvalues.FlagValues() self.call_args = [] def test_success(self): def checker(x): self.call_args.append(x) return True _defines.DEFINE_integer( 'test_flag', None, 'Usual integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program',) self.flag_values(argv) self.assertIsNone(self.flag_values.test_flag) self.flag_values.test_flag = 2 self.assertEqual(2, self.flag_values.test_flag) self.assertEqual([None, 2], self.call_args) def test_default_value_not_used_success(self): def checker(x): self.call_args.append(x) return True _defines.DEFINE_integer( 'test_flag', None, 'Usual integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program', '--test_flag=1') self.flag_values(argv) self.assertEqual(1, self.flag_values.test_flag) self.assertEqual([1], self.call_args) def test_validator_not_called_when_other_flag_is_changed(self): def checker(x): self.call_args.append(x) return True _defines.DEFINE_integer( 'test_flag', 1, 'Usual integer flag', flag_values=self.flag_values) _defines.DEFINE_integer( 'other_flag', 2, 'Other integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program',) self.flag_values(argv) self.assertEqual(1, self.flag_values.test_flag) self.flag_values.other_flag = 3 self.assertEqual([1], self.call_args) def test_exception_raised_if_checker_fails(self): def checker(x): self.call_args.append(x) return x == 1 _defines.DEFINE_integer( 'test_flag', None, 'Usual integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program', '--test_flag=1') self.flag_values(argv) with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.test_flag = 2 self.assertEqual('flag --test_flag=2: Errors happen', str(cm.exception)) self.assertEqual([1, 2], self.call_args) def test_exception_raised_if_checker_raises_exception(self): def checker(x): self.call_args.append(x) if x == 1: return True raise _exceptions.ValidationError('Specific message') _defines.DEFINE_integer( 'test_flag', None, 'Usual integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program', '--test_flag=1') self.flag_values(argv) with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.test_flag = 2 self.assertEqual('flag --test_flag=2: Specific message', str(cm.exception)) self.assertEqual([1, 2], self.call_args) def test_error_message_when_checker_returns_false_on_start(self): def checker(x): self.call_args.append(x) return False _defines.DEFINE_integer( 'test_flag', None, 'Usual integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program', '--test_flag=1') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values(argv) self.assertEqual('flag --test_flag=1: Errors happen', str(cm.exception)) self.assertEqual([1], self.call_args) def test_error_message_when_checker_raises_exception_on_start(self): def checker(x): self.call_args.append(x) raise _exceptions.ValidationError('Specific message') _defines.DEFINE_integer( 'test_flag', None, 'Usual integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program', '--test_flag=1') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values(argv) self.assertEqual('flag --test_flag=1: Specific message', str(cm.exception)) self.assertEqual([1], self.call_args) def test_validators_checked_in_order(self): def required(x): self.calls.append('required') return x is not None def even(x): self.calls.append('even') return x % 2 == 0 self.calls = [] self._define_flag_and_validators(required, even) self.assertEqual(['required', 'even'], self.calls) self.calls = [] self._define_flag_and_validators(even, required) self.assertEqual(['even', 'required'], self.calls) def _define_flag_and_validators(self, first_validator, second_validator): local_flags = _flagvalues.FlagValues() _defines.DEFINE_integer( 'test_flag', 2, 'test flag', flag_values=local_flags) _validators.register_validator( 'test_flag', first_validator, message='', flag_values=local_flags) _validators.register_validator( 'test_flag', second_validator, message='', flag_values=local_flags) argv = ('./program',) local_flags(argv) def test_validator_as_decorator(self): _defines.DEFINE_integer( 'test_flag', None, 'Simple integer flag', flag_values=self.flag_values) @_validators.validator('test_flag', flag_values=self.flag_values) def checker(x): self.call_args.append(x) return True argv = ('./program',) self.flag_values(argv) self.assertIsNone(self.flag_values.test_flag) self.flag_values.test_flag = 2 self.assertEqual(2, self.flag_values.test_flag) self.assertEqual([None, 2], self.call_args) # Check that 'Checker' is still a function and has not been replaced. self.assertTrue(checker(3)) self.assertEqual([None, 2, 3], self.call_args) class MultiFlagsValidatorTest(absltest.TestCase): """Test flags multi-flag validators.""" def setUp(self): super(MultiFlagsValidatorTest, self).setUp() self.flag_values = _flagvalues.FlagValues() self.call_args = [] _defines.DEFINE_integer( 'foo', 1, 'Usual integer flag', flag_values=self.flag_values) _defines.DEFINE_integer( 'bar', 2, 'Usual integer flag', flag_values=self.flag_values) def test_success(self): def checker(flags_dict): self.call_args.append(flags_dict) return True _validators.register_multi_flags_validator( ['foo', 'bar'], checker, flag_values=self.flag_values) argv = ('./program', '--bar=2') self.flag_values(argv) self.assertEqual(1, self.flag_values.foo) self.assertEqual(2, self.flag_values.bar) self.assertEqual([{'foo': 1, 'bar': 2}], self.call_args) self.flag_values.foo = 3 self.assertEqual(3, self.flag_values.foo) self.assertEqual([{'foo': 1, 'bar': 2}, {'foo': 3, 'bar': 2}], self.call_args) def test_validator_not_called_when_other_flag_is_changed(self): def checker(flags_dict): self.call_args.append(flags_dict) return True _defines.DEFINE_integer( 'other_flag', 3, 'Other integer flag', flag_values=self.flag_values) _validators.register_multi_flags_validator( ['foo', 'bar'], checker, flag_values=self.flag_values) argv = ('./program',) self.flag_values(argv) self.flag_values.other_flag = 3 self.assertEqual([{'foo': 1, 'bar': 2}], self.call_args) def test_exception_raised_if_checker_fails(self): def checker(flags_dict): self.call_args.append(flags_dict) values = flags_dict.values() # Make sure all the flags have different values. return len(set(values)) == len(values) _validators.register_multi_flags_validator( ['foo', 'bar'], checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program',) self.flag_values(argv) with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.bar = 1 self.assertEqual('flags foo=1, bar=1: Errors happen', str(cm.exception)) self.assertEqual([{'foo': 1, 'bar': 2}, {'foo': 1, 'bar': 1}], self.call_args) def test_exception_raised_if_checker_raises_exception(self): def checker(flags_dict): self.call_args.append(flags_dict) values = flags_dict.values() # Make sure all the flags have different values. if len(set(values)) != len(values): raise _exceptions.ValidationError('Specific message') return True _validators.register_multi_flags_validator( ['foo', 'bar'], checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program',) self.flag_values(argv) with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.bar = 1 self.assertEqual('flags foo=1, bar=1: Specific message', str(cm.exception)) self.assertEqual([{'foo': 1, 'bar': 2}, {'foo': 1, 'bar': 1}], self.call_args) def test_decorator(self): @_validators.multi_flags_validator( ['foo', 'bar'], message='Errors happen', flag_values=self.flag_values) def checker(flags_dict): # pylint: disable=unused-variable self.call_args.append(flags_dict) values = flags_dict.values() # Make sure all the flags have different values. return len(set(values)) == len(values) argv = ('./program',) self.flag_values(argv) with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.bar = 1 self.assertEqual('flags foo=1, bar=1: Errors happen', str(cm.exception)) self.assertEqual([{'foo': 1, 'bar': 2}, {'foo': 1, 'bar': 1}], self.call_args) class MarkFlagsAsMutualExclusiveTest(absltest.TestCase): def setUp(self): super(MarkFlagsAsMutualExclusiveTest, self).setUp() self.flag_values = _flagvalues.FlagValues() _defines.DEFINE_string( 'flag_one', None, 'flag one', flag_values=self.flag_values) _defines.DEFINE_string( 'flag_two', None, 'flag two', flag_values=self.flag_values) _defines.DEFINE_string( 'flag_three', None, 'flag three', flag_values=self.flag_values) _defines.DEFINE_integer( 'int_flag_one', None, 'int flag one', flag_values=self.flag_values) _defines.DEFINE_integer( 'int_flag_two', None, 'int flag two', flag_values=self.flag_values) _defines.DEFINE_multi_string( 'multi_flag_one', None, 'multi flag one', flag_values=self.flag_values) _defines.DEFINE_multi_string( 'multi_flag_two', None, 'multi flag two', flag_values=self.flag_values) _defines.DEFINE_boolean( 'flag_not_none', False, 'false default', flag_values=self.flag_values) def _mark_flags_as_mutually_exclusive(self, flag_names, required): _validators.mark_flags_as_mutual_exclusive( flag_names, required=required, flag_values=self.flag_values) def test_no_flags_present(self): self._mark_flags_as_mutually_exclusive(['flag_one', 'flag_two'], False) argv = ('./program',) self.flag_values(argv) self.assertIsNone(self.flag_values.flag_one) self.assertIsNone(self.flag_values.flag_two) def test_no_flags_present_required(self): self._mark_flags_as_mutually_exclusive(['flag_one', 'flag_two'], True) argv = ('./program',) expected = ( 'flags flag_one=None, flag_two=None: ' 'Exactly one of (flag_one, flag_two) must have a value other than ' 'None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_one_flag_present(self): self._mark_flags_as_mutually_exclusive(['flag_one', 'flag_two'], False) self.flag_values(('./program', '--flag_one=1')) self.assertEqual('1', self.flag_values.flag_one) def test_one_flag_present_required(self): self._mark_flags_as_mutually_exclusive(['flag_one', 'flag_two'], True) self.flag_values(('./program', '--flag_two=2')) self.assertEqual('2', self.flag_values.flag_two) def test_one_flag_zero_required(self): self._mark_flags_as_mutually_exclusive( ['int_flag_one', 'int_flag_two'], True) self.flag_values(('./program', '--int_flag_one=0')) self.assertEqual(0, self.flag_values.int_flag_one) def test_mutual_exclusion_with_extra_flags(self): self._mark_flags_as_mutually_exclusive(['flag_one', 'flag_two'], True) argv = ('./program', '--flag_two=2', '--flag_three=3') self.flag_values(argv) self.assertEqual('2', self.flag_values.flag_two) self.assertEqual('3', self.flag_values.flag_three) def test_mutual_exclusion_with_zero(self): self._mark_flags_as_mutually_exclusive( ['int_flag_one', 'int_flag_two'], False) argv = ('./program', '--int_flag_one=0', '--int_flag_two=0') expected = ( 'flags int_flag_one=0, int_flag_two=0: ' 'At most one of (int_flag_one, int_flag_two) must have a value other ' 'than None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_multiple_flags_present(self): self._mark_flags_as_mutually_exclusive( ['flag_one', 'flag_two', 'flag_three'], False) argv = ('./program', '--flag_one=1', '--flag_two=2', '--flag_three=3') expected = ( 'flags flag_one=1, flag_two=2, flag_three=3: ' 'At most one of (flag_one, flag_two, flag_three) must have a value ' 'other than None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_multiple_flags_present_required(self): self._mark_flags_as_mutually_exclusive( ['flag_one', 'flag_two', 'flag_three'], True) argv = ('./program', '--flag_one=1', '--flag_two=2', '--flag_three=3') expected = ( 'flags flag_one=1, flag_two=2, flag_three=3: ' 'Exactly one of (flag_one, flag_two, flag_three) must have a value ' 'other than None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_no_multiflags_present(self): self._mark_flags_as_mutually_exclusive( ['multi_flag_one', 'multi_flag_two'], False) argv = ('./program',) self.flag_values(argv) self.assertIsNone(self.flag_values.multi_flag_one) self.assertIsNone(self.flag_values.multi_flag_two) def test_no_multistring_flags_present_required(self): self._mark_flags_as_mutually_exclusive( ['multi_flag_one', 'multi_flag_two'], True) argv = ('./program',) expected = ( 'flags multi_flag_one=None, multi_flag_two=None: ' 'Exactly one of (multi_flag_one, multi_flag_two) must have a value ' 'other than None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_one_multiflag_present(self): self._mark_flags_as_mutually_exclusive( ['multi_flag_one', 'multi_flag_two'], True) self.flag_values(('./program', '--multi_flag_one=1')) self.assertEqual(['1'], self.flag_values.multi_flag_one) def test_one_multiflag_present_repeated(self): self._mark_flags_as_mutually_exclusive( ['multi_flag_one', 'multi_flag_two'], True) self.flag_values(('./program', '--multi_flag_one=1', '--multi_flag_one=1b')) self.assertEqual(['1', '1b'], self.flag_values.multi_flag_one) def test_multiple_multiflags_present(self): self._mark_flags_as_mutually_exclusive( ['multi_flag_one', 'multi_flag_two'], False) argv = ('./program', '--multi_flag_one=1', '--multi_flag_two=2') expected = ( "flags multi_flag_one=['1'], multi_flag_two=['2']: " 'At most one of (multi_flag_one, multi_flag_two) must have a value ' 'other than None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_multiple_multiflags_present_required(self): self._mark_flags_as_mutually_exclusive( ['multi_flag_one', 'multi_flag_two'], True) argv = ('./program', '--multi_flag_one=1', '--multi_flag_two=2') expected = ( "flags multi_flag_one=['1'], multi_flag_two=['2']: " 'Exactly one of (multi_flag_one, multi_flag_two) must have a value ' 'other than None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_flag_default_not_none_warning(self): with warnings.catch_warnings(record=True) as caught_warnings: warnings.simplefilter('always') self._mark_flags_as_mutually_exclusive(['flag_one', 'flag_not_none'], False) self.assertLen(caught_warnings, 1) self.assertIn('--flag_not_none has a non-None default value', str(caught_warnings[0].message)) class MarkBoolFlagsAsMutualExclusiveTest(absltest.TestCase): def setUp(self): super(MarkBoolFlagsAsMutualExclusiveTest, self).setUp() self.flag_values = _flagvalues.FlagValues() _defines.DEFINE_boolean( 'false_1', False, 'default false 1', flag_values=self.flag_values) _defines.DEFINE_boolean( 'false_2', False, 'default false 2', flag_values=self.flag_values) _defines.DEFINE_boolean( 'true_1', True, 'default true 1', flag_values=self.flag_values) _defines.DEFINE_integer( 'non_bool', None, 'non bool', flag_values=self.flag_values) def _mark_bool_flags_as_mutually_exclusive(self, flag_names, required): _validators.mark_bool_flags_as_mutual_exclusive( flag_names, required=required, flag_values=self.flag_values) def test_no_flags_present(self): self._mark_bool_flags_as_mutually_exclusive(['false_1', 'false_2'], False) self.flag_values(('./program',)) self.assertEqual(False, self.flag_values.false_1) self.assertEqual(False, self.flag_values.false_2) def test_no_flags_present_required(self): self._mark_bool_flags_as_mutually_exclusive(['false_1', 'false_2'], True) argv = ('./program',) expected = ( 'flags false_1=False, false_2=False: ' 'Exactly one of (false_1, false_2) must be True.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_no_flags_present_with_default_true_required(self): self._mark_bool_flags_as_mutually_exclusive(['false_1', 'true_1'], True) self.flag_values(('./program',)) self.assertEqual(False, self.flag_values.false_1) self.assertEqual(True, self.flag_values.true_1) def test_two_flags_true(self): self._mark_bool_flags_as_mutually_exclusive(['false_1', 'false_2'], False) argv = ('./program', '--false_1', '--false_2') expected = ( 'flags false_1=True, false_2=True: At most one of (false_1, ' 'false_2) must be True.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_non_bool_flag(self): expected = ('Flag --non_bool is not Boolean, which is required for flags ' 'used in mark_bool_flags_as_mutual_exclusive.') with self.assertRaisesWithLiteralMatch(_exceptions.ValidationError, expected): self._mark_bool_flags_as_mutually_exclusive(['false_1', 'non_bool'], False) class MarkFlagAsRequiredTest(absltest.TestCase): def setUp(self): super(MarkFlagAsRequiredTest, self).setUp() self.flag_values = _flagvalues.FlagValues() def test_success(self): _defines.DEFINE_string( 'string_flag', None, 'string flag', flag_values=self.flag_values) _validators.mark_flag_as_required( 'string_flag', flag_values=self.flag_values) argv = ('./program', '--string_flag=value') self.flag_values(argv) self.assertEqual('value', self.flag_values.string_flag) def test_catch_none_as_default(self): _defines.DEFINE_string( 'string_flag', None, 'string flag', flag_values=self.flag_values) _validators.mark_flag_as_required( 'string_flag', flag_values=self.flag_values) argv = ('./program',) expected = ( r'flag --string_flag=None: Flag --string_flag must have a value other ' r'than None\.') with self.assertRaisesRegex(_exceptions.IllegalFlagValueError, expected): self.flag_values(argv) def test_catch_setting_none_after_program_start(self): _defines.DEFINE_string( 'string_flag', 'value', 'string flag', flag_values=self.flag_values) _validators.mark_flag_as_required( 'string_flag', flag_values=self.flag_values) argv = ('./program',) self.flag_values(argv) self.assertEqual('value', self.flag_values.string_flag) expected = ('flag --string_flag=None: Flag --string_flag must have a value ' 'other than None.') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.string_flag = None self.assertEqual(expected, str(cm.exception)) def test_flag_default_not_none_warning(self): _defines.DEFINE_string( 'flag_not_none', '', 'empty default', flag_values=self.flag_values) with warnings.catch_warnings(record=True) as caught_warnings: warnings.simplefilter('always') _validators.mark_flag_as_required( 'flag_not_none', flag_values=self.flag_values) self.assertLen(caught_warnings, 1) self.assertIn('--flag_not_none has a non-None default value', str(caught_warnings[0].message)) class MarkFlagsAsRequiredTest(absltest.TestCase): def setUp(self): super(MarkFlagsAsRequiredTest, self).setUp() self.flag_values = _flagvalues.FlagValues() def test_success(self): _defines.DEFINE_string( 'string_flag_1', None, 'string flag 1', flag_values=self.flag_values) _defines.DEFINE_string( 'string_flag_2', None, 'string flag 2', flag_values=self.flag_values) flag_names = ['string_flag_1', 'string_flag_2'] _validators.mark_flags_as_required(flag_names, flag_values=self.flag_values) argv = ('./program', '--string_flag_1=value_1', '--string_flag_2=value_2') self.flag_values(argv) self.assertEqual('value_1', self.flag_values.string_flag_1) self.assertEqual('value_2', self.flag_values.string_flag_2) def test_catch_none_as_default(self): _defines.DEFINE_string( 'string_flag_1', None, 'string flag 1', flag_values=self.flag_values) _defines.DEFINE_string( 'string_flag_2', None, 'string flag 2', flag_values=self.flag_values) _validators.mark_flags_as_required( ['string_flag_1', 'string_flag_2'], flag_values=self.flag_values) argv = ('./program', '--string_flag_1=value_1') expected = ( r'flag --string_flag_2=None: Flag --string_flag_2 must have a value ' r'other than None\.') with self.assertRaisesRegex(_exceptions.IllegalFlagValueError, expected): self.flag_values(argv) def test_catch_setting_none_after_program_start(self): _defines.DEFINE_string( 'string_flag_1', 'value_1', 'string flag 1', flag_values=self.flag_values) _defines.DEFINE_string( 'string_flag_2', 'value_2', 'string flag 2', flag_values=self.flag_values) _validators.mark_flags_as_required( ['string_flag_1', 'string_flag_2'], flag_values=self.flag_values) argv = ('./program', '--string_flag_1=value_1') self.flag_values(argv) self.assertEqual('value_1', self.flag_values.string_flag_1) expected = ( 'flag --string_flag_1=None: Flag --string_flag_1 must have a value ' 'other than None.') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.string_flag_1 = None self.assertEqual(expected, str(cm.exception)) def test_catch_multiple_flags_as_none_at_program_start(self): _defines.DEFINE_float( 'float_flag_1', None, 'string flag 1', flag_values=self.flag_values) _defines.DEFINE_float( 'float_flag_2', None, 'string flag 2', flag_values=self.flag_values) _validators.mark_flags_as_required( ['float_flag_1', 'float_flag_2'], flag_values=self.flag_values) argv = ('./program', '') expected = ( 'flag --float_flag_1=None: Flag --float_flag_1 must have a value ' 'other than None.\n' 'flag --float_flag_2=None: Flag --float_flag_2 must have a value ' 'other than None.') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values(argv) self.assertEqual(expected, str(cm.exception)) def test_fail_fast_single_flag_and_skip_remaining_validators(self): def raise_unexpected_error(x): del x raise _exceptions.ValidationError('Should not be raised.') _defines.DEFINE_float( 'flag_1', None, 'flag 1', flag_values=self.flag_values) _defines.DEFINE_float( 'flag_2', 4.2, 'flag 2', flag_values=self.flag_values) _validators.mark_flag_as_required('flag_1', flag_values=self.flag_values) _validators.register_validator( 'flag_1', raise_unexpected_error, flag_values=self.flag_values) _validators.register_multi_flags_validator(['flag_2', 'flag_1'], raise_unexpected_error, flag_values=self.flag_values) argv = ('./program', '') expected = ( 'flag --flag_1=None: Flag --flag_1 must have a value other than None.') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values(argv) self.assertEqual(expected, str(cm.exception)) def test_fail_fast_multi_flag_and_skip_remaining_validators(self): def raise_expected_error(x): del x raise _exceptions.ValidationError('Expected error.') def raise_unexpected_error(x): del x raise _exceptions.ValidationError('Got unexpected error.') _defines.DEFINE_float( 'flag_1', 5.1, 'flag 1', flag_values=self.flag_values) _defines.DEFINE_float( 'flag_2', 10.0, 'flag 2', flag_values=self.flag_values) _validators.register_multi_flags_validator(['flag_1', 'flag_2'], raise_expected_error, flag_values=self.flag_values) _validators.register_multi_flags_validator(['flag_2', 'flag_1'], raise_unexpected_error, flag_values=self.flag_values) _validators.register_validator( 'flag_1', raise_unexpected_error, flag_values=self.flag_values) _validators.register_validator( 'flag_2', raise_unexpected_error, flag_values=self.flag_values) argv = ('./program', '') expected = ('flags flag_1=5.1, flag_2=10.0: Expected error.') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values(argv) self.assertEqual(expected, str(cm.exception)) if __name__ == '__main__': absltest.main()
39.22953
80
0.685622
from __future__ import absolute_import from __future__ import division from __future__ import print_function import warnings from absl.flags import _defines from absl.flags import _exceptions from absl.flags import _flagvalues from absl.flags import _validators from absl.testing import absltest class SingleFlagValidatorTest(absltest.TestCase): def setUp(self): super(SingleFlagValidatorTest, self).setUp() self.flag_values = _flagvalues.FlagValues() self.call_args = [] def test_success(self): def checker(x): self.call_args.append(x) return True _defines.DEFINE_integer( 'test_flag', None, 'Usual integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program',) self.flag_values(argv) self.assertIsNone(self.flag_values.test_flag) self.flag_values.test_flag = 2 self.assertEqual(2, self.flag_values.test_flag) self.assertEqual([None, 2], self.call_args) def test_default_value_not_used_success(self): def checker(x): self.call_args.append(x) return True _defines.DEFINE_integer( 'test_flag', None, 'Usual integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program', '--test_flag=1') self.flag_values(argv) self.assertEqual(1, self.flag_values.test_flag) self.assertEqual([1], self.call_args) def test_validator_not_called_when_other_flag_is_changed(self): def checker(x): self.call_args.append(x) return True _defines.DEFINE_integer( 'test_flag', 1, 'Usual integer flag', flag_values=self.flag_values) _defines.DEFINE_integer( 'other_flag', 2, 'Other integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program',) self.flag_values(argv) self.assertEqual(1, self.flag_values.test_flag) self.flag_values.other_flag = 3 self.assertEqual([1], self.call_args) def test_exception_raised_if_checker_fails(self): def checker(x): self.call_args.append(x) return x == 1 _defines.DEFINE_integer( 'test_flag', None, 'Usual integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program', '--test_flag=1') self.flag_values(argv) with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.test_flag = 2 self.assertEqual('flag --test_flag=2: Errors happen', str(cm.exception)) self.assertEqual([1, 2], self.call_args) def test_exception_raised_if_checker_raises_exception(self): def checker(x): self.call_args.append(x) if x == 1: return True raise _exceptions.ValidationError('Specific message') _defines.DEFINE_integer( 'test_flag', None, 'Usual integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program', '--test_flag=1') self.flag_values(argv) with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.test_flag = 2 self.assertEqual('flag --test_flag=2: Specific message', str(cm.exception)) self.assertEqual([1, 2], self.call_args) def test_error_message_when_checker_returns_false_on_start(self): def checker(x): self.call_args.append(x) return False _defines.DEFINE_integer( 'test_flag', None, 'Usual integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program', '--test_flag=1') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values(argv) self.assertEqual('flag --test_flag=1: Errors happen', str(cm.exception)) self.assertEqual([1], self.call_args) def test_error_message_when_checker_raises_exception_on_start(self): def checker(x): self.call_args.append(x) raise _exceptions.ValidationError('Specific message') _defines.DEFINE_integer( 'test_flag', None, 'Usual integer flag', flag_values=self.flag_values) _validators.register_validator( 'test_flag', checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program', '--test_flag=1') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values(argv) self.assertEqual('flag --test_flag=1: Specific message', str(cm.exception)) self.assertEqual([1], self.call_args) def test_validators_checked_in_order(self): def required(x): self.calls.append('required') return x is not None def even(x): self.calls.append('even') return x % 2 == 0 self.calls = [] self._define_flag_and_validators(required, even) self.assertEqual(['required', 'even'], self.calls) self.calls = [] self._define_flag_and_validators(even, required) self.assertEqual(['even', 'required'], self.calls) def _define_flag_and_validators(self, first_validator, second_validator): local_flags = _flagvalues.FlagValues() _defines.DEFINE_integer( 'test_flag', 2, 'test flag', flag_values=local_flags) _validators.register_validator( 'test_flag', first_validator, message='', flag_values=local_flags) _validators.register_validator( 'test_flag', second_validator, message='', flag_values=local_flags) argv = ('./program',) local_flags(argv) def test_validator_as_decorator(self): _defines.DEFINE_integer( 'test_flag', None, 'Simple integer flag', flag_values=self.flag_values) @_validators.validator('test_flag', flag_values=self.flag_values) def checker(x): self.call_args.append(x) return True argv = ('./program',) self.flag_values(argv) self.assertIsNone(self.flag_values.test_flag) self.flag_values.test_flag = 2 self.assertEqual(2, self.flag_values.test_flag) self.assertEqual([None, 2], self.call_args) self.assertTrue(checker(3)) self.assertEqual([None, 2, 3], self.call_args) class MultiFlagsValidatorTest(absltest.TestCase): def setUp(self): super(MultiFlagsValidatorTest, self).setUp() self.flag_values = _flagvalues.FlagValues() self.call_args = [] _defines.DEFINE_integer( 'foo', 1, 'Usual integer flag', flag_values=self.flag_values) _defines.DEFINE_integer( 'bar', 2, 'Usual integer flag', flag_values=self.flag_values) def test_success(self): def checker(flags_dict): self.call_args.append(flags_dict) return True _validators.register_multi_flags_validator( ['foo', 'bar'], checker, flag_values=self.flag_values) argv = ('./program', '--bar=2') self.flag_values(argv) self.assertEqual(1, self.flag_values.foo) self.assertEqual(2, self.flag_values.bar) self.assertEqual([{'foo': 1, 'bar': 2}], self.call_args) self.flag_values.foo = 3 self.assertEqual(3, self.flag_values.foo) self.assertEqual([{'foo': 1, 'bar': 2}, {'foo': 3, 'bar': 2}], self.call_args) def test_validator_not_called_when_other_flag_is_changed(self): def checker(flags_dict): self.call_args.append(flags_dict) return True _defines.DEFINE_integer( 'other_flag', 3, 'Other integer flag', flag_values=self.flag_values) _validators.register_multi_flags_validator( ['foo', 'bar'], checker, flag_values=self.flag_values) argv = ('./program',) self.flag_values(argv) self.flag_values.other_flag = 3 self.assertEqual([{'foo': 1, 'bar': 2}], self.call_args) def test_exception_raised_if_checker_fails(self): def checker(flags_dict): self.call_args.append(flags_dict) values = flags_dict.values() return len(set(values)) == len(values) _validators.register_multi_flags_validator( ['foo', 'bar'], checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program',) self.flag_values(argv) with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.bar = 1 self.assertEqual('flags foo=1, bar=1: Errors happen', str(cm.exception)) self.assertEqual([{'foo': 1, 'bar': 2}, {'foo': 1, 'bar': 1}], self.call_args) def test_exception_raised_if_checker_raises_exception(self): def checker(flags_dict): self.call_args.append(flags_dict) values = flags_dict.values() if len(set(values)) != len(values): raise _exceptions.ValidationError('Specific message') return True _validators.register_multi_flags_validator( ['foo', 'bar'], checker, message='Errors happen', flag_values=self.flag_values) argv = ('./program',) self.flag_values(argv) with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.bar = 1 self.assertEqual('flags foo=1, bar=1: Specific message', str(cm.exception)) self.assertEqual([{'foo': 1, 'bar': 2}, {'foo': 1, 'bar': 1}], self.call_args) def test_decorator(self): @_validators.multi_flags_validator( ['foo', 'bar'], message='Errors happen', flag_values=self.flag_values) def checker(flags_dict): self.call_args.append(flags_dict) values = flags_dict.values() return len(set(values)) == len(values) argv = ('./program',) self.flag_values(argv) with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.bar = 1 self.assertEqual('flags foo=1, bar=1: Errors happen', str(cm.exception)) self.assertEqual([{'foo': 1, 'bar': 2}, {'foo': 1, 'bar': 1}], self.call_args) class MarkFlagsAsMutualExclusiveTest(absltest.TestCase): def setUp(self): super(MarkFlagsAsMutualExclusiveTest, self).setUp() self.flag_values = _flagvalues.FlagValues() _defines.DEFINE_string( 'flag_one', None, 'flag one', flag_values=self.flag_values) _defines.DEFINE_string( 'flag_two', None, 'flag two', flag_values=self.flag_values) _defines.DEFINE_string( 'flag_three', None, 'flag three', flag_values=self.flag_values) _defines.DEFINE_integer( 'int_flag_one', None, 'int flag one', flag_values=self.flag_values) _defines.DEFINE_integer( 'int_flag_two', None, 'int flag two', flag_values=self.flag_values) _defines.DEFINE_multi_string( 'multi_flag_one', None, 'multi flag one', flag_values=self.flag_values) _defines.DEFINE_multi_string( 'multi_flag_two', None, 'multi flag two', flag_values=self.flag_values) _defines.DEFINE_boolean( 'flag_not_none', False, 'false default', flag_values=self.flag_values) def _mark_flags_as_mutually_exclusive(self, flag_names, required): _validators.mark_flags_as_mutual_exclusive( flag_names, required=required, flag_values=self.flag_values) def test_no_flags_present(self): self._mark_flags_as_mutually_exclusive(['flag_one', 'flag_two'], False) argv = ('./program',) self.flag_values(argv) self.assertIsNone(self.flag_values.flag_one) self.assertIsNone(self.flag_values.flag_two) def test_no_flags_present_required(self): self._mark_flags_as_mutually_exclusive(['flag_one', 'flag_two'], True) argv = ('./program',) expected = ( 'flags flag_one=None, flag_two=None: ' 'Exactly one of (flag_one, flag_two) must have a value other than ' 'None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_one_flag_present(self): self._mark_flags_as_mutually_exclusive(['flag_one', 'flag_two'], False) self.flag_values(('./program', '--flag_one=1')) self.assertEqual('1', self.flag_values.flag_one) def test_one_flag_present_required(self): self._mark_flags_as_mutually_exclusive(['flag_one', 'flag_two'], True) self.flag_values(('./program', '--flag_two=2')) self.assertEqual('2', self.flag_values.flag_two) def test_one_flag_zero_required(self): self._mark_flags_as_mutually_exclusive( ['int_flag_one', 'int_flag_two'], True) self.flag_values(('./program', '--int_flag_one=0')) self.assertEqual(0, self.flag_values.int_flag_one) def test_mutual_exclusion_with_extra_flags(self): self._mark_flags_as_mutually_exclusive(['flag_one', 'flag_two'], True) argv = ('./program', '--flag_two=2', '--flag_three=3') self.flag_values(argv) self.assertEqual('2', self.flag_values.flag_two) self.assertEqual('3', self.flag_values.flag_three) def test_mutual_exclusion_with_zero(self): self._mark_flags_as_mutually_exclusive( ['int_flag_one', 'int_flag_two'], False) argv = ('./program', '--int_flag_one=0', '--int_flag_two=0') expected = ( 'flags int_flag_one=0, int_flag_two=0: ' 'At most one of (int_flag_one, int_flag_two) must have a value other ' 'than None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_multiple_flags_present(self): self._mark_flags_as_mutually_exclusive( ['flag_one', 'flag_two', 'flag_three'], False) argv = ('./program', '--flag_one=1', '--flag_two=2', '--flag_three=3') expected = ( 'flags flag_one=1, flag_two=2, flag_three=3: ' 'At most one of (flag_one, flag_two, flag_three) must have a value ' 'other than None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_multiple_flags_present_required(self): self._mark_flags_as_mutually_exclusive( ['flag_one', 'flag_two', 'flag_three'], True) argv = ('./program', '--flag_one=1', '--flag_two=2', '--flag_three=3') expected = ( 'flags flag_one=1, flag_two=2, flag_three=3: ' 'Exactly one of (flag_one, flag_two, flag_three) must have a value ' 'other than None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_no_multiflags_present(self): self._mark_flags_as_mutually_exclusive( ['multi_flag_one', 'multi_flag_two'], False) argv = ('./program',) self.flag_values(argv) self.assertIsNone(self.flag_values.multi_flag_one) self.assertIsNone(self.flag_values.multi_flag_two) def test_no_multistring_flags_present_required(self): self._mark_flags_as_mutually_exclusive( ['multi_flag_one', 'multi_flag_two'], True) argv = ('./program',) expected = ( 'flags multi_flag_one=None, multi_flag_two=None: ' 'Exactly one of (multi_flag_one, multi_flag_two) must have a value ' 'other than None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_one_multiflag_present(self): self._mark_flags_as_mutually_exclusive( ['multi_flag_one', 'multi_flag_two'], True) self.flag_values(('./program', '--multi_flag_one=1')) self.assertEqual(['1'], self.flag_values.multi_flag_one) def test_one_multiflag_present_repeated(self): self._mark_flags_as_mutually_exclusive( ['multi_flag_one', 'multi_flag_two'], True) self.flag_values(('./program', '--multi_flag_one=1', '--multi_flag_one=1b')) self.assertEqual(['1', '1b'], self.flag_values.multi_flag_one) def test_multiple_multiflags_present(self): self._mark_flags_as_mutually_exclusive( ['multi_flag_one', 'multi_flag_two'], False) argv = ('./program', '--multi_flag_one=1', '--multi_flag_two=2') expected = ( "flags multi_flag_one=['1'], multi_flag_two=['2']: " 'At most one of (multi_flag_one, multi_flag_two) must have a value ' 'other than None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_multiple_multiflags_present_required(self): self._mark_flags_as_mutually_exclusive( ['multi_flag_one', 'multi_flag_two'], True) argv = ('./program', '--multi_flag_one=1', '--multi_flag_two=2') expected = ( "flags multi_flag_one=['1'], multi_flag_two=['2']: " 'Exactly one of (multi_flag_one, multi_flag_two) must have a value ' 'other than None.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_flag_default_not_none_warning(self): with warnings.catch_warnings(record=True) as caught_warnings: warnings.simplefilter('always') self._mark_flags_as_mutually_exclusive(['flag_one', 'flag_not_none'], False) self.assertLen(caught_warnings, 1) self.assertIn('--flag_not_none has a non-None default value', str(caught_warnings[0].message)) class MarkBoolFlagsAsMutualExclusiveTest(absltest.TestCase): def setUp(self): super(MarkBoolFlagsAsMutualExclusiveTest, self).setUp() self.flag_values = _flagvalues.FlagValues() _defines.DEFINE_boolean( 'false_1', False, 'default false 1', flag_values=self.flag_values) _defines.DEFINE_boolean( 'false_2', False, 'default false 2', flag_values=self.flag_values) _defines.DEFINE_boolean( 'true_1', True, 'default true 1', flag_values=self.flag_values) _defines.DEFINE_integer( 'non_bool', None, 'non bool', flag_values=self.flag_values) def _mark_bool_flags_as_mutually_exclusive(self, flag_names, required): _validators.mark_bool_flags_as_mutual_exclusive( flag_names, required=required, flag_values=self.flag_values) def test_no_flags_present(self): self._mark_bool_flags_as_mutually_exclusive(['false_1', 'false_2'], False) self.flag_values(('./program',)) self.assertEqual(False, self.flag_values.false_1) self.assertEqual(False, self.flag_values.false_2) def test_no_flags_present_required(self): self._mark_bool_flags_as_mutually_exclusive(['false_1', 'false_2'], True) argv = ('./program',) expected = ( 'flags false_1=False, false_2=False: ' 'Exactly one of (false_1, false_2) must be True.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_no_flags_present_with_default_true_required(self): self._mark_bool_flags_as_mutually_exclusive(['false_1', 'true_1'], True) self.flag_values(('./program',)) self.assertEqual(False, self.flag_values.false_1) self.assertEqual(True, self.flag_values.true_1) def test_two_flags_true(self): self._mark_bool_flags_as_mutually_exclusive(['false_1', 'false_2'], False) argv = ('./program', '--false_1', '--false_2') expected = ( 'flags false_1=True, false_2=True: At most one of (false_1, ' 'false_2) must be True.') self.assertRaisesWithLiteralMatch(_exceptions.IllegalFlagValueError, expected, self.flag_values, argv) def test_non_bool_flag(self): expected = ('Flag --non_bool is not Boolean, which is required for flags ' 'used in mark_bool_flags_as_mutual_exclusive.') with self.assertRaisesWithLiteralMatch(_exceptions.ValidationError, expected): self._mark_bool_flags_as_mutually_exclusive(['false_1', 'non_bool'], False) class MarkFlagAsRequiredTest(absltest.TestCase): def setUp(self): super(MarkFlagAsRequiredTest, self).setUp() self.flag_values = _flagvalues.FlagValues() def test_success(self): _defines.DEFINE_string( 'string_flag', None, 'string flag', flag_values=self.flag_values) _validators.mark_flag_as_required( 'string_flag', flag_values=self.flag_values) argv = ('./program', '--string_flag=value') self.flag_values(argv) self.assertEqual('value', self.flag_values.string_flag) def test_catch_none_as_default(self): _defines.DEFINE_string( 'string_flag', None, 'string flag', flag_values=self.flag_values) _validators.mark_flag_as_required( 'string_flag', flag_values=self.flag_values) argv = ('./program',) expected = ( r'flag --string_flag=None: Flag --string_flag must have a value other ' r'than None\.') with self.assertRaisesRegex(_exceptions.IllegalFlagValueError, expected): self.flag_values(argv) def test_catch_setting_none_after_program_start(self): _defines.DEFINE_string( 'string_flag', 'value', 'string flag', flag_values=self.flag_values) _validators.mark_flag_as_required( 'string_flag', flag_values=self.flag_values) argv = ('./program',) self.flag_values(argv) self.assertEqual('value', self.flag_values.string_flag) expected = ('flag --string_flag=None: Flag --string_flag must have a value ' 'other than None.') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.string_flag = None self.assertEqual(expected, str(cm.exception)) def test_flag_default_not_none_warning(self): _defines.DEFINE_string( 'flag_not_none', '', 'empty default', flag_values=self.flag_values) with warnings.catch_warnings(record=True) as caught_warnings: warnings.simplefilter('always') _validators.mark_flag_as_required( 'flag_not_none', flag_values=self.flag_values) self.assertLen(caught_warnings, 1) self.assertIn('--flag_not_none has a non-None default value', str(caught_warnings[0].message)) class MarkFlagsAsRequiredTest(absltest.TestCase): def setUp(self): super(MarkFlagsAsRequiredTest, self).setUp() self.flag_values = _flagvalues.FlagValues() def test_success(self): _defines.DEFINE_string( 'string_flag_1', None, 'string flag 1', flag_values=self.flag_values) _defines.DEFINE_string( 'string_flag_2', None, 'string flag 2', flag_values=self.flag_values) flag_names = ['string_flag_1', 'string_flag_2'] _validators.mark_flags_as_required(flag_names, flag_values=self.flag_values) argv = ('./program', '--string_flag_1=value_1', '--string_flag_2=value_2') self.flag_values(argv) self.assertEqual('value_1', self.flag_values.string_flag_1) self.assertEqual('value_2', self.flag_values.string_flag_2) def test_catch_none_as_default(self): _defines.DEFINE_string( 'string_flag_1', None, 'string flag 1', flag_values=self.flag_values) _defines.DEFINE_string( 'string_flag_2', None, 'string flag 2', flag_values=self.flag_values) _validators.mark_flags_as_required( ['string_flag_1', 'string_flag_2'], flag_values=self.flag_values) argv = ('./program', '--string_flag_1=value_1') expected = ( r'flag --string_flag_2=None: Flag --string_flag_2 must have a value ' r'other than None\.') with self.assertRaisesRegex(_exceptions.IllegalFlagValueError, expected): self.flag_values(argv) def test_catch_setting_none_after_program_start(self): _defines.DEFINE_string( 'string_flag_1', 'value_1', 'string flag 1', flag_values=self.flag_values) _defines.DEFINE_string( 'string_flag_2', 'value_2', 'string flag 2', flag_values=self.flag_values) _validators.mark_flags_as_required( ['string_flag_1', 'string_flag_2'], flag_values=self.flag_values) argv = ('./program', '--string_flag_1=value_1') self.flag_values(argv) self.assertEqual('value_1', self.flag_values.string_flag_1) expected = ( 'flag --string_flag_1=None: Flag --string_flag_1 must have a value ' 'other than None.') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values.string_flag_1 = None self.assertEqual(expected, str(cm.exception)) def test_catch_multiple_flags_as_none_at_program_start(self): _defines.DEFINE_float( 'float_flag_1', None, 'string flag 1', flag_values=self.flag_values) _defines.DEFINE_float( 'float_flag_2', None, 'string flag 2', flag_values=self.flag_values) _validators.mark_flags_as_required( ['float_flag_1', 'float_flag_2'], flag_values=self.flag_values) argv = ('./program', '') expected = ( 'flag --float_flag_1=None: Flag --float_flag_1 must have a value ' 'other than None.\n' 'flag --float_flag_2=None: Flag --float_flag_2 must have a value ' 'other than None.') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values(argv) self.assertEqual(expected, str(cm.exception)) def test_fail_fast_single_flag_and_skip_remaining_validators(self): def raise_unexpected_error(x): del x raise _exceptions.ValidationError('Should not be raised.') _defines.DEFINE_float( 'flag_1', None, 'flag 1', flag_values=self.flag_values) _defines.DEFINE_float( 'flag_2', 4.2, 'flag 2', flag_values=self.flag_values) _validators.mark_flag_as_required('flag_1', flag_values=self.flag_values) _validators.register_validator( 'flag_1', raise_unexpected_error, flag_values=self.flag_values) _validators.register_multi_flags_validator(['flag_2', 'flag_1'], raise_unexpected_error, flag_values=self.flag_values) argv = ('./program', '') expected = ( 'flag --flag_1=None: Flag --flag_1 must have a value other than None.') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values(argv) self.assertEqual(expected, str(cm.exception)) def test_fail_fast_multi_flag_and_skip_remaining_validators(self): def raise_expected_error(x): del x raise _exceptions.ValidationError('Expected error.') def raise_unexpected_error(x): del x raise _exceptions.ValidationError('Got unexpected error.') _defines.DEFINE_float( 'flag_1', 5.1, 'flag 1', flag_values=self.flag_values) _defines.DEFINE_float( 'flag_2', 10.0, 'flag 2', flag_values=self.flag_values) _validators.register_multi_flags_validator(['flag_1', 'flag_2'], raise_expected_error, flag_values=self.flag_values) _validators.register_multi_flags_validator(['flag_2', 'flag_1'], raise_unexpected_error, flag_values=self.flag_values) _validators.register_validator( 'flag_1', raise_unexpected_error, flag_values=self.flag_values) _validators.register_validator( 'flag_2', raise_unexpected_error, flag_values=self.flag_values) argv = ('./program', '') expected = ('flags flag_1=5.1, flag_2=10.0: Expected error.') with self.assertRaises(_exceptions.IllegalFlagValueError) as cm: self.flag_values(argv) self.assertEqual(expected, str(cm.exception)) if __name__ == '__main__': absltest.main()
true
true
f72481a010acb7f8d898500cf94f600286f5da8d
64,856
py
Python
pybit/__init__.py
leftcoastgeek/pybit
3564bd08a5e34c95a15f8a03b100282ddffd3edf
[ "MIT" ]
null
null
null
pybit/__init__.py
leftcoastgeek/pybit
3564bd08a5e34c95a15f8a03b100282ddffd3edf
[ "MIT" ]
null
null
null
pybit/__init__.py
leftcoastgeek/pybit
3564bd08a5e34c95a15f8a03b100282ddffd3edf
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ pybit ------------------------ pybit is a lightweight and high-performance API connector for the RESTful and WebSocket APIs of the Bybit exchange. Documentation can be found at https://github.com/verata-veritatis/pybit :copyright: (c) 2020-2021 verata-veritatis :license: MIT License """ import time import hmac import json import logging import threading import requests import websocket from datetime import datetime as dt from concurrent.futures import ThreadPoolExecutor from .exceptions import FailedRequestError, InvalidRequestError # Requests will use simplejson if available. try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError # Versioning. VERSION = '1.1.18' class HTTP: """ Connector for Bybit's HTTP API. :param endpoint: The endpoint URL of the HTTP API, e.g. 'https://api-testnet.bybit.com'. :type endpoint: str :param api_key: Your API key. Required for authenticated endpoints. Defaults to None. :type api_key: str :param api_secret: Your API secret key. Required for authenticated endpoints. Defaults to None. :type api_secret: str :param logging_level: The logging level of the built-in logger. Defaults to logging.INFO. Options are CRITICAL (50), ERROR (40), WARNING (30), INFO (20), DEBUG (10), or NOTSET (0). :type logging_level: Union[int, logging.level] :param log_requests: Whether or not pybit should log each HTTP request. :type log_requests: bool :param request_timeout: The timeout of each API request in seconds. Defaults to 10 seconds. :type request_timeout: int :param recv_window: How long an HTTP request is valid in ms. Default is 5000. :type recv_window: int :param force_retry: Whether or not pybit should retry a timed-out request. :type force_retry: bool :param retry_codes: A list of non-fatal status codes to retry on. :type retry_codes: set :param ignore_codes: A list of non-fatal status codes to ignore. :type ignore_codes: set :param max_retries: The number of times to re-attempt a request. :type max_retries: int :param retry_delay: Seconds between retries for returned error or timed-out requests. Default is 3 seconds. :type retry_delay: int :param referral_id: An optional referer ID can be added to each request for identification. :type referral_id: str :returns: pybit.HTTP session. """ def __init__(self, endpoint=None, api_key=None, api_secret=None, logging_level=logging.INFO, log_requests=False, request_timeout=10, recv_window=5000, force_retry=False, retry_codes=None, ignore_codes=None, max_retries=3, retry_delay=3, referral_id=None): """Initializes the HTTP class.""" # Set the endpoint. if endpoint is None: self.endpoint = 'https://api.bybit.com' else: self.endpoint = endpoint # Setup logger. self.logger = logging.getLogger(__name__) if len(logging.root.handlers) == 0: #no handler on root logger set -> we add handler just for this logger to not mess with custom logic from outside handler = logging.StreamHandler() handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) ) handler.setLevel(logging_level) self.logger.addHandler(handler) self.logger.debug('Initializing HTTP session.') self.log_requests = log_requests # Set API keys. self.api_key = api_key self.api_secret = api_secret # Set timeout. self.timeout = request_timeout self.recv_window = recv_window self.force_retry = force_retry self.max_retries = max_retries self.retry_delay = retry_delay # Set whitelist of non-fatal Bybit status codes to retry on. if retry_codes is None: self.retry_codes = {10002, 10006, 30034, 30035, 130035, 130150} else: self.retry_codes = retry_codes # Set whitelist of non-fatal Bybit status codes to ignore. if ignore_codes is None: self.ignore_codes = set() else: self.ignore_codes = ignore_codes # Initialize requests session. self.client = requests.Session() self.client.headers.update( { 'User-Agent': 'pybit-' + VERSION, 'Content-Type': 'application/json', 'Accept': 'application/json', } ) # Add referral ID to header. if referral_id: self.client.headers.update({'Referer': referral_id}) def _exit(self): """Closes the request session.""" self.client.close() self.logger.debug('HTTP session closed.') def orderbook(self, **kwargs): """ Get the orderbook. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-orderbook. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/v2/public/orderBook/L2', query=kwargs ) def query_kline(self, **kwargs): """ Get kline. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-querykline. :returns: Request results as dictionary. """ # Replace query param 'from_time' since 'from' keyword is reserved. # Temporary workaround until Bybit updates official request params if 'from_time' in kwargs: kwargs['from'] = kwargs.pop('from_time') if kwargs.get('symbol', '').endswith('USDT'): suffix = '/public/linear/kline' else: suffix = '/v2/public/kline/list' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs ) def latest_information_for_symbol(self, **kwargs): """ Get the latest information for symbol. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-latestsymbolinfo. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/v2/public/tickers', query=kwargs ) def public_trading_records(self, **kwargs): """ Get recent trades. You can find a complete history of trades on Bybit at https://public.bybit.com/. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-latestsymbolinfo. :returns: Request results as dictionary. """ # Replace query param 'from_id' since 'from' keyword is reserved. # Temporary workaround until Bybit updates official request params if 'from_id' in kwargs: kwargs['from'] = kwargs.pop('from_id') if kwargs.get('symbol', '').endswith('USDT'): suffix = '/public/linear/recent-trading-records' else: suffix = '/v2/public/trading-records' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs ) def query_symbol(self): """ Get symbol info. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/v2/public/symbols' ) def liquidated_orders(self, **kwargs): """ Retrieve the liquidated orders. The query range is the last seven days of data. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-query_liqrecords. :returns: Request results as dictionary. """ # Replace query param 'from_id' since 'from' keyword is reserved. # Temporary workaround until Bybit updates official request params if 'from_id' in kwargs: kwargs['from'] = kwargs.pop('from_id') return self._submit_request( method='GET', path=self.endpoint + '/v2/public/liq-records', query=kwargs ) def query_mark_price_kline(self, **kwargs): """ Query mark price kline (like query_kline but for mark price). :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-markpricekline. :returns: Request results as dictionary. """ # Replace query param 'from_time' since 'from' keyword is reserved. # Temporary workaround until Bybit updates official request params if 'from_time' in kwargs: kwargs['from'] = kwargs.pop('from_time') if kwargs.get('symbol', '').endswith('USDT'): suffix = '/public/linear/mark-price-kline' else: suffix = '/v2/public/mark-price-kline' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs ) def open_interest(self, **kwargs): """ Gets the total amount of unsettled contracts. In other words, the total number of contracts held in open positions. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-marketopeninterest. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/v2/public/open-interest', query=kwargs ) def latest_big_deal(self, **kwargs): """ Obtain filled orders worth more than 500,000 USD within the last 24h. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-marketbigdeal. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/v2/public/big-deal', query=kwargs ) def long_short_ratio(self, **kwargs): """ Gets the Bybit long-short ratio. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-marketaccountratio. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/v2/public/account-ratio', query=kwargs ) def place_active_order(self, **kwargs): """ Places an active order. For more information, see https://bybit-exchange.github.io/docs/inverse/#t-activeorders. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-activeorders. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/order/create' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/order/create' else: suffix = '/v2/private/order/create' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def place_active_order_bulk(self, orders: list, max_in_parallel=10): """ Places multiple active orders in bulk using multithreading. For more information on place_active_order, see https://bybit-exchange.github.io/docs/inverse/#t-activeorders. :param list orders: A list of orders and their parameters. :param max_in_parallel: The number of requests to be sent in parallel. Note that you are limited to 50 requests per second. :returns: Future request result dictionaries as a list. """ with ThreadPoolExecutor(max_workers=max_in_parallel) as executor: executions = [ executor.submit( self.place_active_order, **order ) for order in orders ] executor.shutdown() return [execution.result() for execution in executions] def get_active_order(self, **kwargs): """ Gets an active order. For more information, see https://bybit-exchange.github.io/docs/inverse/#t-getactive. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-getactive. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/order/list' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/v2/private/order/list' else: suffix = '/futures/private/order/list' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def cancel_active_order(self, **kwargs): """ Cancels an active order. For more information, see https://bybit-exchange.github.io/docs/inverse/#t-cancelactive. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-cancelactive. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/order/cancel' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/order/cancel' else: suffix = '/v2/private/order/cancel' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def cancel_active_order_bulk(self, orders: list, max_in_parallel=10): """ Cancels multiple active orders in bulk using multithreading. For more information on cancel_active_order, see https://bybit-exchange.github.io/docs/inverse/#t-activeorders. :param list orders: A list of orders and their parameters. :param max_in_parallel: The number of requests to be sent in parallel. Note that you are limited to 50 requests per second. :returns: Future request result dictionaries as a list. """ with ThreadPoolExecutor(max_workers=max_in_parallel) as executor: executions = [ executor.submit( self.cancel_active_order, **order ) for order in orders ] executor.shutdown() return [execution.result() for execution in executions] def cancel_all_active_orders(self, **kwargs): """ Cancel all active orders that are unfilled or partially filled. Fully filled orders cannot be cancelled. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-cancelallactive. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/order/cancel-all' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/order/cancelAll' else: suffix = '/v2/private/order/cancelAll' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def replace_active_order(self, **kwargs): """ Replace order can modify/amend your active orders. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-replaceactive. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/order/replace' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/order/replace' else: suffix = '/v2/private/order/replace' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def replace_active_order_bulk(self, orders: list, max_in_parallel=10): """ Replaces multiple active orders in bulk using multithreading. For more information on replace_active_order, see https://bybit-exchange.github.io/docs/inverse/#t-replaceactive. :param list orders: A list of orders and their parameters. :param max_in_parallel: The number of requests to be sent in parallel. Note that you are limited to 50 requests per second. :returns: Future request result dictionaries as a list. """ with ThreadPoolExecutor(max_workers=max_in_parallel) as executor: executions = [ executor.submit( self.replace_active_order, **order ) for order in orders ] executor.shutdown() return [execution.result() for execution in executions] def query_active_order(self, **kwargs): """ Query real-time active order information. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-queryactive. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/order/search' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/order' else: suffix = '/v2/private/order' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def place_conditional_order(self, **kwargs): """ Places a conditional order. For more information, see https://bybit-exchange.github.io/docs/inverse/#t-placecond. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-placecond. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/stop-order/create' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/stop-order/create' else: suffix = '/v2/private/stop-order/create' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def place_conditional_order_bulk(self, orders: list, max_in_parallel=10): """ Places multiple conditional orders in bulk using multithreading. For more information on place_active_order, see https://bybit-exchange.github.io/docs/inverse/#t-placecond. :param orders: A list of orders and their parameters. :param max_in_parallel: The number of requests to be sent in parallel. Note that you are limited to 50 requests per second. :returns: Future request result dictionaries as a list. """ with ThreadPoolExecutor(max_workers=max_in_parallel) as executor: executions = [ executor.submit( self.place_conditional_order, **order ) for order in orders ] executor.shutdown() return [execution.result() for execution in executions] def get_conditional_order(self, **kwargs): """ Gets a conditional order. For more information, see https://bybit-exchange.github.io/docs/inverse/#t-getcond. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-getcond. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/stop-order/list' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/stop-order/list' else: suffix = '/v2/private/stop-order/list' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def cancel_conditional_order(self, **kwargs): """ Cancels a conditional order. For more information, see https://bybit-exchange.github.io/docs/inverse/#t-cancelcond. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-cancelcond. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/stop-order/cancel' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/stop-order/cancel' else: suffix = '/v2/private/stop-order/cancel' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def cancel_conditional_order_bulk(self, orders: list, max_in_parallel=10): """ Cancels multiple conditional orders in bulk using multithreading. For more information on cancel_active_order, see https://bybit-exchange.github.io/docs/inverse/#t-cancelcond. :param list orders: A list of orders and their parameters. :param max_in_parallel: The number of requests to be sent in parallel. Note that you are limited to 50 requests per second. :returns: Future request result dictionaries as a list. """ with ThreadPoolExecutor(max_workers=max_in_parallel) as executor: executions = [ executor.submit( self.cancel_conditional_order, **order ) for order in orders ] executor.shutdown() return [execution.result() for execution in executions] def cancel_all_conditional_orders(self, **kwargs): """ Cancel all conditional orders that are unfilled or partially filled. Fully filled orders cannot be cancelled. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-cancelallcond. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/stop-order/cancel-all' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/stop-order/cancelAll' else: suffix = '/v2/private/stop-order/cancelAll' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def replace_conditional_order(self, **kwargs): """ Replace conditional order can modify/amend your conditional orders. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-replacecond. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/stop-order/replace' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/stop-order/replace' else: suffix = '/v2/private/stop-order/replace' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def replace_conditional_order_bulk(self, orders: list, max_in_parallel=10): """ Replaces multiple conditional orders in bulk using multithreading. For more information on replace_active_order, see https://bybit-exchange.github.io/docs/inverse/#t-replacecond. :param list orders: A list of orders and their parameters. :param max_in_parallel: The number of requests to be sent in parallel. Note that you are limited to 50 requests per second. :returns: Future request result dictionaries as a list. """ with ThreadPoolExecutor(max_workers=max_in_parallel) as executor: executions = [ executor.submit( self.replace_conditional_order, **order ) for order in orders ] executor.shutdown() return [execution.result() for execution in executions] def query_conditional_order(self, **kwargs): """ Query real-time conditional order information. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-querycond. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/stop-order/search' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/stop-order' else: suffix = '/v2/private/stop-order' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def my_position(self, **kwargs): """ Get my position list. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-myposition. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/position/list' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/position/list' else: suffix = '/v2/private/position/list' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def set_auto_add_margin(self, **kwargs): """ For linear markets only. Set auto add margin, or Auto-Margin Replenishment. :param kwargs: See https://bybit-exchange.github.io/docs/linear/#t-setautoaddmargin. :returns: Request results as dictionary. """ return self._submit_request( method='POST', path=self.endpoint + '/private/linear/position/set-auto-add-margin', query=kwargs, auth=True ) def set_leverage(self, **kwargs): """ Change user leverage. :param kwargs: See https://bybit-exchange.github.io/docs/linear/#t-setleverage. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/position/set-leverage' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/position/leverage/save' else: suffix = '/v2/private/position/leverage/save' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def cross_isolated_margin_switch(self, **kwargs): """ For linear markets only. Switch Cross/Isolated; must be leverage value when switching from Cross to Isolated. :param kwargs: See https://bybit-exchange.github.io/docs/linear/#t-marginswitch. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/position/switch-isolated' else: suffix = '/futures/private/position/switch-mode' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def position_mode_switch(self, **kwargs): """ For futures markets only. Switch Cross/Isolated; must set leverage value when switching from Cross to Isolated. :param kwargs: See https://bybit-exchange.github.io/docs/inverse_futures/#t-marginswitch. :returns: Request results as dictionary. """ return self._submit_request( method='POST', path=self.endpoint + '/futures/private/position/switch-mode', query=kwargs, auth=True ) def change_margin(self, **kwargs): """ Update margin. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-changemargin. :returns: Request results as dictionary. """ if kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/position/change-position-margin' else: suffix = '/v2/private/position/change-position-margin' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def set_trading_stop(self, **kwargs): """ Set take profit, stop loss, and trailing stop for your open position. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-tradingstop. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/position/trading-stop' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/position/trading-stop' else: suffix = '/v2/private/position/trading-stop' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def add_reduce_margin(self, **kwargs): """ For linear markets only. Add margin. :param kwargs: See https://bybit-exchange.github.io/docs/linear/#t-addmargin. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/private/linear/position/add-margin', query=kwargs, auth=True ) def user_leverage(self, **kwargs): """ ABANDONED! Please use my_position instead. Fetches user leverage by fetching user position. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-getleverage. :returns: Request results as dictionary. """ self.logger.warning('This endpoint is deprecated and will be removed. Use my_position()') return self._submit_request( method='GET', path=self.endpoint + '/v2/private/position/list', query=kwargs, auth=True ) def change_user_leverage(self, **kwargs): """ Change user leverage. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-changeleverage. :returns: Request results as dictionary. """ self.logger.warning('This endpoint is deprecated and will be removed. Use set_leverage()') return self._submit_request( method='POST', path=self.endpoint + '/user/leverage/save', query=kwargs, auth=True ) def user_trade_records(self, **kwargs): """ Get user's trading records. The results are ordered in ascending order (the first item is the oldest). :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-usertraderecords. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/trade/execution/list' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/execution/list' else: suffix = '/v2/private/execution/list' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def closed_profit_and_loss(self, **kwargs): """ Get user's closed profit and loss records. The results are ordered in descending order (the first item is the latest). :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-closedprofitandloss. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/trade/closed-pnl/list' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/trade/closed-pnl/list' else: suffix = '/v2/private/trade/closed-pnl/list' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def get_risk_limit(self, is_linear=False): """ Get risk limit. :param is_linear: True for linear, False for inverse. Defaults to False. :returns: Request results as dictionary. """ if is_linear: suffix = '/public/linear/risk-limit' else: suffix = '/open-api/wallet/risk-limit/list' return self._submit_request( method='GET', path=self.endpoint + suffix, auth=True ) def set_risk_limit(self, **kwargs): """ Set risk limit. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-setrisklimit. :returns: Request results as dictionary. """ return self._submit_request( method='POST', path=self.endpoint + '/open-api/wallet/risk-limit', query=kwargs, auth=True ) def get_the_last_funding_rate(self, **kwargs): """ The funding rate is generated every 8 hours at 00:00 UTC, 08:00 UTC and 16:00 UTC. For example, if a request is sent at 12:00 UTC, the funding rate generated earlier that day at 08:00 UTC will be sent. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-fundingrate. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/public/linear/funding/prev-funding-rate' else: suffix = '/v2/private/funding/prev-funding-rate' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs ) def my_last_funding_fee(self, **kwargs): """ Funding settlement occurs every 8 hours at 00:00 UTC, 08:00 UTC and 16:00 UTC. The current interval's fund fee settlement is based on the previous interval's fund rate. For example, at 16:00, the settlement is based on the fund rate generated at 8:00. The fund rate generated at 16:00 will be used at 0:00 the next day. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-mylastfundingfee. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/funding/prev-funding' else: suffix = '/v2/private/funding/prev-funding' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def predicted_funding_rate(self, **kwargs): """ Get predicted funding rate and my funding fee. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-predictedfunding. :returns: Request results as dictionary. """ if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/funding/predicted-funding' else: suffix = '/v2/private/funding/predicted-funding' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def api_key_info(self): """ Get user's API key info. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/v2/private/account/api-key', auth=True ) def lcp_info(self, **kwargs): """ Get user's LCP (data refreshes once an hour). Only supports inverse perpetual at present. See https://bybit-exchange.github.io/docs/inverse/#t-liquidity to learn more. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-lcp. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/v2/private/account/lcp', query=kwargs, auth=True ) def get_wallet_balance(self, **kwargs): """ Get wallet balance info. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-balance. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/v2/private/wallet/balance', query=kwargs, auth=True ) def wallet_fund_records(self, **kwargs): """ Get wallet fund records. This endpoint also shows exchanges from the Asset Exchange, where the types for the exchange are ExchangeOrderWithdraw and ExchangeOrderDeposit. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-walletrecords. :returns: Request results as dictionary. """ # Replace query param 'from_id' since 'from' keyword is reserved. # Temporary workaround until Bybit updates official request params if 'from_id' in kwargs: kwargs['from'] = kwargs.pop('from_id') return self._submit_request( method='GET', path=self.endpoint + '/v2/private/wallet/fund/records', query=kwargs, auth=True ) def withdraw_records(self, **kwargs): """ Get withdrawal records. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-withdrawrecords. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/v2/private/wallet/withdraw/list', query=kwargs, auth=True ) def asset_exchange_records(self, **kwargs): """ Get asset exchange records. :param kwargs: See https://bybit-exchange.github.io/docs/inverse/#t-assetexchangerecords. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/v2/private/exchange-order/list', query=kwargs, auth=True ) def server_time(self): """ Get Bybit server time. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/v2/public/time' ) def announcement(self): """ Get Bybit OpenAPI announcements in the last 30 days by reverse order. :returns: Request results as dictionary. """ return self._submit_request( method='GET', path=self.endpoint + '/v2/public/announcement' ) ''' Additional Methods These methods use two or more requests to perform a specific function and are exclusive to pybit. ''' def close_position(self, symbol): """ Closes your open position. Makes two requests (position, order). Parameters ------------------------ symbol : str Required parameter. The symbol of the market as a string, e.g. 'BTCUSD'. """ # First we fetch the user's position. try: r = self.my_position(symbol=symbol)['result'] # If there is no returned position, we want to handle that. except KeyError: return self.logger.error('No position detected.') # Next we generate a list of market orders orders = [ { 'symbol': symbol, 'order_type': 'Market', 'side': 'Buy' if p['side'] == 'Sell' else 'Sell', 'qty': p['size'], 'time_in_force': 'ImmediateOrCancel', 'reduce_only': True, 'close_on_trigger': True } for p in (r if isinstance(r, list) else [r]) if p['size'] > 0 ] if len(orders) == 0: return self.logger.error('No position detected.') # Submit a market order against each open position for the same qty. return self.place_active_order_bulk(orders) ''' Internal methods; signature and request submission. For more information about the request signature, see https://bybit-exchange.github.io/docs/inverse/#t-authentication. ''' def _auth(self, method, params, recv_window): """ Generates authentication signature per Bybit API specifications. Notes ------------------- Since the POST method requires a JSONified dict, we need to ensure the signature uses lowercase booleans instead of Python's capitalized booleans. This is done in the bug fix below. """ api_key = self.api_key api_secret = self.api_secret if api_key is None or api_secret is None: raise PermissionError('Authenticated endpoints require keys.') # Append required parameters. params['api_key'] = api_key params['recv_window'] = recv_window params['timestamp'] = int(time.time() * 10 ** 3) # Sort dictionary alphabetically to create querystring. _val = '&'.join( [str(k) + '=' + str(v) for k, v in sorted(params.items()) if (k != 'sign') and (v is not None)] ) # Bug fix. Replaces all capitalized booleans with lowercase. if method == 'POST': _val = _val.replace('True', 'true').replace('False', 'false') # Return signature. return str(hmac.new( bytes(api_secret, 'utf-8'), bytes(_val, 'utf-8'), digestmod='sha256' ).hexdigest()) def _submit_request(self, method=None, path=None, query=None, auth=False): """ Submits the request to the API. Notes ------------------- We use the params argument for the GET method, and data argument for the POST method. Dicts passed to the data argument must be JSONified prior to submitting request. """ # Store original recv_window. recv_window = self.recv_window # Bug fix: change floating whole numbers to integers to prevent # auth signature errors. if query is not None: for i in query.keys(): if isinstance(query[i], float) and query[i] == int(query[i]): query[i] = int(query[i]) # Send request and return headers with body. Retry if failed. retries_attempted = self.max_retries req_params = None while True: retries_attempted -= 1 if retries_attempted < 0: raise FailedRequestError( request=f'{method} {path}: {req_params}', message='Bad Request. Retries exceeded maximum.', status_code=400, time=dt.utcnow().strftime("%H:%M:%S") ) retries_remaining = f'{retries_attempted} retries remain.' # Authenticate if we are using a private endpoint. if auth: # Prepare signature. signature = self._auth( method=method, params=query, recv_window=recv_window, ) # Sort the dictionary alphabetically. query = dict(sorted(query.items(), key=lambda x: x)) # Append the signature to the dictionary. query['sign'] = signature # Define parameters and log the request. if query is not None: req_params = {k: v for k, v in query.items() if v is not None} else: req_params = {} # Log the request. if self.log_requests: self.logger.debug(f'Request -> {method} {path}: {req_params}') # Prepare request; use 'params' for GET and 'data' for POST. if method == 'GET': r = self.client.prepare_request( requests.Request(method, path, params=req_params) ) else: r = self.client.prepare_request( requests.Request(method, path, data=json.dumps(req_params)) ) # Attempt the request. try: s = self.client.send(r, timeout=self.timeout) # If requests fires an error, retry. except ( requests.exceptions.ReadTimeout, requests.exceptions.SSLError, requests.exceptions.ConnectionError ) as e: if self.force_retry: self.logger.error(f'{e}. {retries_remaining}') time.sleep(self.retry_delay) continue else: raise e # Convert response to dictionary, or raise if requests error. try: s_json = s.json() # If we have trouble converting, handle the error and retry. except JSONDecodeError as e: if self.force_retry: self.logger.error(f'{e}. {retries_remaining}') time.sleep(self.retry_delay) continue else: raise FailedRequestError( request=f'{method} {path}: {req_params}', message='Conflict. Could not decode JSON.', status_code=409, time=dt.utcnow().strftime("%H:%M:%S") ) # If Bybit returns an error, raise. if s_json['ret_code']: # Generate error message. error_msg = ( f'{s_json["ret_msg"]} (ErrCode: {s_json["ret_code"]})' ) # Set default retry delay. err_delay = self.retry_delay # Retry non-fatal whitelisted error requests. if s_json['ret_code'] in self.retry_codes: # 10002, recv_window error; add 2.5 seconds and retry. if s_json['ret_code'] == 10002: error_msg += '. Added 2.5 seconds to recv_window' recv_window += 2500 # 10006, ratelimit error; wait until rate_limit_reset_ms # and retry. elif s_json['ret_code'] == 10006: self.logger.error( f'{error_msg}. Ratelimited on current request. ' f'Sleeping, then trying again. Request: {path}' ) # Calculate how long we need to wait. limit_reset = s_json['rate_limit_reset_ms'] / 1000 reset_str = time.strftime( '%X', time.localtime(limit_reset) ) err_delay = int(limit_reset) - int(time.time()) error_msg = ( f'Ratelimit will reset at {reset_str}. ' f'Sleeping for {err_delay} seconds' ) # Log the error. self.logger.error(f'{error_msg}. {retries_remaining}') time.sleep(err_delay) continue elif s_json['ret_code'] in self.ignore_codes: pass else: raise InvalidRequestError( request=f'{method} {path}: {req_params}', message=s_json["ret_msg"], status_code=s_json["ret_code"], time=dt.utcnow().strftime("%H:%M:%S") ) else: return s_json class WebSocket: """ Connector for Bybit's WebSocket API. """ def __init__(self, endpoint, api_key=None, api_secret=None, subscriptions=None, logging_level=logging.INFO, max_data_length=200, ping_interval=30, ping_timeout=10, restart_on_error=True, purge_on_fetch=True, trim_data=True): """ Initializes the websocket session. :param endpoint: Required parameter. The endpoint of the remote websocket. :param api_key: Your API key. Required for authenticated endpoints. Defaults to None. :param api_secret: Your API secret key. Required for authenticated endpoints. Defaults to None. :param subscriptions: A list of desired topics to subscribe to. See API documentation for more information. Defaults to an empty list, which will raise an error. :param logging_level: The logging level of the built-in logger. Defaults to logging.INFO. Options are CRITICAL (50), ERROR (40), WARNING (30), INFO (20), DEBUG (10), or NOTSET (0). :param max_data_length: The maximum number of rows for the stored dataset. A smaller number will prevent performance or memory issues. :param ping_interval: The number of seconds between each automated ping. :param ping_timeout: The number of seconds to wait for 'pong' before an Exception is raised. :param restart_on_error: Whether or not the connection should restart on error. :param purge_on_fetch: Whether or not stored data should be purged each fetch. For example, if the user subscribes to the 'trade' topic, and fetches, should the data show all trade history up to the maximum length or only get the data since the last fetch? :param trim_data: Decide whether the returning data should be trimmed to only provide the data value. :returns: WebSocket session. """ if not subscriptions: raise Exception('Subscription list cannot be empty!') # Require symbol on 'trade' topic. if 'trade' in subscriptions: raise Exception('\'trade\' requires a ticker, e.g. ' '\'trade.BTCUSD\'.') # Require currency on 'insurance' topic. if 'insurance' in subscriptions: raise Exception('\'insurance\' requires a currency, e.g. ' '\'insurance.BTC\'.') # Require timeframe and ticker on 'klineV2' topic. if 'klineV2' in subscriptions: raise Exception('\'klineV2\' requires a timeframe and ticker, e.g.' ' \'klineV2.5.BTCUSD\'.') # set websocket name for logging purposes self.wsName = 'Authenticated' if api_key else 'Non-Authenticated' # Setup logger. self.logger = logging.getLogger(__name__) if len(logging.root.handlers) == 0: # no handler on root logger set -> we add handler just for this logger to not mess with custom logic from outside handler = logging.StreamHandler() handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) ) handler.setLevel(logging_level) self.logger.addHandler(handler) self.logger.debug(f'Initializing {self.wsName} WebSocket.') # Ensure authentication for private topics. if any(i in subscriptions for i in [ 'position', 'execution', 'order', 'stop_order', 'wallet' ]) and api_key is None: raise PermissionError('You must be authorized to use ' 'private topics!') # Set endpoint. self.endpoint = endpoint # Set API keys. self.api_key = api_key self.api_secret = api_secret # Set topic subscriptions for WebSocket. self.subscriptions = subscriptions self.max_length = max_data_length # Set ping settings. self.ping_interval = ping_interval self.ping_timeout = ping_timeout # Other optional data handling settings. self.handle_error = restart_on_error self.purge = purge_on_fetch self.trim = trim_data # Set initial state, initialize dictionary and connnect. self._reset() self._connect(self.endpoint) def fetch(self, topic): """ Fetches data from the subscribed topic. :param topic: Required parameter. The subscribed topic to poll. :returns: Filtered data as dict. """ # If topic isn't a string. if not isinstance(topic, str): self.logger.error('Topic argument must be a string.') return # If the topic given isn't in the initial subscribed list. if topic not in self.subscriptions: self.logger.error(f'You aren\'t subscribed to the {topic} topic.') return # Pop all trade or execution data on each poll. # dont pop order or stop_order data as we will lose valuable state if topic.startswith(( 'trade', 'execution' )) and not topic.startswith('orderBook'): data = self.data[topic].copy() if self.purge: self.data[topic] = [] return data else: try: return self.data[topic] except KeyError: return [] def ping(self): """ Pings the remote server to test the connection. The status of the connection can be monitored using ws.ping(). """ self.ws.send(json.dumps({'op': 'ping'})) def exit(self): """ Closes the websocket connection. """ self.ws.close() while self.ws.sock: continue self.exited = True def _auth(self): """ Authorize websocket connection. """ # Generate expires. expires = int((time.time() + 1) * 1000) # Generate signature. _val = f'GET/realtime{expires}' signature = str(hmac.new( bytes(self.api_secret, 'utf-8'), bytes(_val, 'utf-8'), digestmod='sha256' ).hexdigest()) # Authenticate with API. self.ws.send( json.dumps({ 'op': 'auth', 'args': [self.api_key, expires, signature] }) ) def _connect(self, url): """ Open websocket in a thread. """ self.ws = websocket.WebSocketApp( url=url, on_message=lambda ws, msg: self._on_message(msg), on_close=self._on_close(), on_open=self._on_open(), on_error=lambda ws, err: self._on_error(err) ) # Setup the thread running WebSocketApp. self.wst = threading.Thread(target=lambda: self.ws.run_forever( ping_interval=self.ping_interval, ping_timeout=self.ping_timeout )) # Configure as daemon; start. self.wst.daemon = True self.wst.start() # Attempt to connect for X seconds. retries = 10 while retries > 0 and (not self.ws.sock or not self.ws.sock.connected): retries -= 1 time.sleep(1) # If connection was not successful, raise error. if retries <= 0: self.exit() raise websocket.WebSocketTimeoutException('Connection failed.') # If given an api_key, authenticate. if self.api_key and self.api_secret: self._auth() # Check if subscriptions is a list. if isinstance(self.subscriptions, str): self.subscriptions = [self.subscriptions] # Subscribe to the requested topics. self.ws.send( json.dumps({ 'op': 'subscribe', 'args': self.subscriptions }) ) # Initialize the topics. for topic in self.subscriptions: if topic not in self.data: self.data[topic] = {} @staticmethod def _find_index(source, target, key): """ Find the index in source list of the targeted ID. """ return next(i for i, j in enumerate(source) if j[key] == target[key]) def _on_message(self, message): """ Parse incoming messages. Similar structure to the official WS connector. """ # Load dict of message. msg_json = json.loads(message) # If 'success' exists if 'success' in msg_json: if msg_json['success']: # If 'request' exists. if 'request' in msg_json: # If we get succesful auth, notify user. if msg_json['request']['op'] == 'auth': self.logger.debug('Authorization successful.') self.auth = True # If we get successful subscription, notify user. if msg_json['request']['op'] == 'subscribe': sub = msg_json['request']['args'] self.logger.debug(f'Subscription to {sub} successful.') else: response = msg_json['ret_msg'] if 'unknown topic' in response: self.logger.error('Couldn\'t subscribe to topic.' f' Error: {response}.') # If we get unsuccesful auth, notify user. elif msg_json['request']['op'] == 'auth': self.logger.debug('Authorization failed. Please check your ' 'API keys and restart.') elif 'topic' in msg_json: topic = msg_json['topic'] # If incoming 'orderbookL2' data. if 'orderBook' in topic: # Make updates according to delta response. if 'delta' in msg_json['type']: # Delete. for entry in msg_json['data']['delete']: index = self._find_index(self.data[topic], entry, 'id') self.data[topic].pop(index) # Update. for entry in msg_json['data']['update']: index = self._find_index(self.data[topic], entry, 'id') self.data[topic][index] = entry # Insert. for entry in msg_json['data']['insert']: self.data[topic].append(entry) # Record the initial snapshot. elif 'snapshot' in msg_json['type']: self.data[topic] = msg_json['data'] # For incoming 'order' and 'stop_order' data. elif any(i in topic for i in ['order', 'stop_order']): # record incoming data for i in msg_json['data']: try: # update existing entries # temporary workaround for field anomaly in stop_order data ord_id = topic + '_id' if i['symbol'].endswith('USDT') else 'order_id' index = self._find_index(self.data[topic], i, ord_id) self.data[topic][index] = i except StopIteration: # Keep appending or create new list if not already created. try: self.data[topic].append(i) except AttributeError: self.data[topic] = msg_json['data'] # For incoming 'trade' and 'execution' data. elif any(i in topic for i in ['trade', 'execution']): # Keep appending or create new list if not already created. try: for i in msg_json['data']: self.data[topic].append(i) except AttributeError: self.data[topic] = msg_json['data'] # If list is too long, pop the first entry. if len(self.data[topic]) > self.max_length: self.data[topic].pop(0) # If incoming 'insurance', 'klineV2', or 'wallet' data. elif any(i in topic for i in ['insurance', 'klineV2', 'wallet', 'candle']): # Record incoming data. self.data[topic] = msg_json['data'][0] if self.trim else msg_json # If incoming 'instrument_info' data. elif 'instrument_info' in topic: # Make updates according to delta response. if 'delta' in msg_json['type']: for i in msg_json['data']['update'][0]: self.data[topic][i] = msg_json['data']['update'][0][i] # Record the initial snapshot. elif 'snapshot' in msg_json['type']: self.data[topic] = msg_json['data'] if self.trim else msg_json # If incoming 'position' data. elif 'position' in topic: # Record incoming position data. for p in msg_json['data']: # linear (USDT) positions have Buy|Sell side and # updates contain all USDT positions. # For linear tickers... if p['symbol'].endswith('USDT'): try: self.data[topic][p['symbol']][p['side']] = p # if side key hasn't been created yet... except KeyError: self.data[topic][p['symbol']] = {p['side']: p} # For non-linear tickers... else: self.data[topic][p['symbol']] = p def _on_error(self, error): """ Exit on errors and raise exception, or attempt reconnect. """ if not self.exited: self.logger.error(f'WebSocket {self.wsName} encountered error: {error}.') self.exit() # Reconnect. if self.handle_error: self._reset() self._connect(self.endpoint) def _on_open(self): """ Log WS open. """ self.logger.debug(f'WebSocket {self.wsName} opened.') def _on_close(self): """ Log WS close. """ self.logger.debug(f'WebSocket {self.wsName} closed.') def _reset(self): """ Set state booleans and initialize dictionary. """ self.exited = False self.auth = False self.data = {}
33.691429
125
0.556926
import time import hmac import json import logging import threading import requests import websocket from datetime import datetime as dt from concurrent.futures import ThreadPoolExecutor from .exceptions import FailedRequestError, InvalidRequestError try: from simplejson.errors import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError VERSION = '1.1.18' class HTTP: def __init__(self, endpoint=None, api_key=None, api_secret=None, logging_level=logging.INFO, log_requests=False, request_timeout=10, recv_window=5000, force_retry=False, retry_codes=None, ignore_codes=None, max_retries=3, retry_delay=3, referral_id=None): if endpoint is None: self.endpoint = 'https://api.bybit.com' else: self.endpoint = endpoint self.logger = logging.getLogger(__name__) if len(logging.root.handlers) == 0: handler = logging.StreamHandler() handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) ) handler.setLevel(logging_level) self.logger.addHandler(handler) self.logger.debug('Initializing HTTP session.') self.log_requests = log_requests self.api_key = api_key self.api_secret = api_secret self.timeout = request_timeout self.recv_window = recv_window self.force_retry = force_retry self.max_retries = max_retries self.retry_delay = retry_delay if retry_codes is None: self.retry_codes = {10002, 10006, 30034, 30035, 130035, 130150} else: self.retry_codes = retry_codes if ignore_codes is None: self.ignore_codes = set() else: self.ignore_codes = ignore_codes self.client = requests.Session() self.client.headers.update( { 'User-Agent': 'pybit-' + VERSION, 'Content-Type': 'application/json', 'Accept': 'application/json', } ) if referral_id: self.client.headers.update({'Referer': referral_id}) def _exit(self): self.client.close() self.logger.debug('HTTP session closed.') def orderbook(self, **kwargs): return self._submit_request( method='GET', path=self.endpoint + '/v2/public/orderBook/L2', query=kwargs ) def query_kline(self, **kwargs): if 'from_time' in kwargs: kwargs['from'] = kwargs.pop('from_time') if kwargs.get('symbol', '').endswith('USDT'): suffix = '/public/linear/kline' else: suffix = '/v2/public/kline/list' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs ) def latest_information_for_symbol(self, **kwargs): return self._submit_request( method='GET', path=self.endpoint + '/v2/public/tickers', query=kwargs ) def public_trading_records(self, **kwargs): if 'from_id' in kwargs: kwargs['from'] = kwargs.pop('from_id') if kwargs.get('symbol', '').endswith('USDT'): suffix = '/public/linear/recent-trading-records' else: suffix = '/v2/public/trading-records' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs ) def query_symbol(self): return self._submit_request( method='GET', path=self.endpoint + '/v2/public/symbols' ) def liquidated_orders(self, **kwargs): if 'from_id' in kwargs: kwargs['from'] = kwargs.pop('from_id') return self._submit_request( method='GET', path=self.endpoint + '/v2/public/liq-records', query=kwargs ) def query_mark_price_kline(self, **kwargs): if 'from_time' in kwargs: kwargs['from'] = kwargs.pop('from_time') if kwargs.get('symbol', '').endswith('USDT'): suffix = '/public/linear/mark-price-kline' else: suffix = '/v2/public/mark-price-kline' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs ) def open_interest(self, **kwargs): return self._submit_request( method='GET', path=self.endpoint + '/v2/public/open-interest', query=kwargs ) def latest_big_deal(self, **kwargs): return self._submit_request( method='GET', path=self.endpoint + '/v2/public/big-deal', query=kwargs ) def long_short_ratio(self, **kwargs): return self._submit_request( method='GET', path=self.endpoint + '/v2/public/account-ratio', query=kwargs ) def place_active_order(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/order/create' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/order/create' else: suffix = '/v2/private/order/create' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def place_active_order_bulk(self, orders: list, max_in_parallel=10): with ThreadPoolExecutor(max_workers=max_in_parallel) as executor: executions = [ executor.submit( self.place_active_order, **order ) for order in orders ] executor.shutdown() return [execution.result() for execution in executions] def get_active_order(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/order/list' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/v2/private/order/list' else: suffix = '/futures/private/order/list' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def cancel_active_order(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/order/cancel' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/order/cancel' else: suffix = '/v2/private/order/cancel' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def cancel_active_order_bulk(self, orders: list, max_in_parallel=10): with ThreadPoolExecutor(max_workers=max_in_parallel) as executor: executions = [ executor.submit( self.cancel_active_order, **order ) for order in orders ] executor.shutdown() return [execution.result() for execution in executions] def cancel_all_active_orders(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/order/cancel-all' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/order/cancelAll' else: suffix = '/v2/private/order/cancelAll' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def replace_active_order(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/order/replace' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/order/replace' else: suffix = '/v2/private/order/replace' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def replace_active_order_bulk(self, orders: list, max_in_parallel=10): with ThreadPoolExecutor(max_workers=max_in_parallel) as executor: executions = [ executor.submit( self.replace_active_order, **order ) for order in orders ] executor.shutdown() return [execution.result() for execution in executions] def query_active_order(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/order/search' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/order' else: suffix = '/v2/private/order' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def place_conditional_order(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/stop-order/create' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/stop-order/create' else: suffix = '/v2/private/stop-order/create' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def place_conditional_order_bulk(self, orders: list, max_in_parallel=10): with ThreadPoolExecutor(max_workers=max_in_parallel) as executor: executions = [ executor.submit( self.place_conditional_order, **order ) for order in orders ] executor.shutdown() return [execution.result() for execution in executions] def get_conditional_order(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/stop-order/list' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/stop-order/list' else: suffix = '/v2/private/stop-order/list' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def cancel_conditional_order(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/stop-order/cancel' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/stop-order/cancel' else: suffix = '/v2/private/stop-order/cancel' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def cancel_conditional_order_bulk(self, orders: list, max_in_parallel=10): with ThreadPoolExecutor(max_workers=max_in_parallel) as executor: executions = [ executor.submit( self.cancel_conditional_order, **order ) for order in orders ] executor.shutdown() return [execution.result() for execution in executions] def cancel_all_conditional_orders(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/stop-order/cancel-all' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/stop-order/cancelAll' else: suffix = '/v2/private/stop-order/cancelAll' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def replace_conditional_order(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/stop-order/replace' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/stop-order/replace' else: suffix = '/v2/private/stop-order/replace' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def replace_conditional_order_bulk(self, orders: list, max_in_parallel=10): with ThreadPoolExecutor(max_workers=max_in_parallel) as executor: executions = [ executor.submit( self.replace_conditional_order, **order ) for order in orders ] executor.shutdown() return [execution.result() for execution in executions] def query_conditional_order(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/stop-order/search' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/stop-order' else: suffix = '/v2/private/stop-order' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def my_position(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/position/list' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/position/list' else: suffix = '/v2/private/position/list' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def set_auto_add_margin(self, **kwargs): return self._submit_request( method='POST', path=self.endpoint + '/private/linear/position/set-auto-add-margin', query=kwargs, auth=True ) def set_leverage(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/position/set-leverage' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/position/leverage/save' else: suffix = '/v2/private/position/leverage/save' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def cross_isolated_margin_switch(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/position/switch-isolated' else: suffix = '/futures/private/position/switch-mode' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def position_mode_switch(self, **kwargs): return self._submit_request( method='POST', path=self.endpoint + '/futures/private/position/switch-mode', query=kwargs, auth=True ) def change_margin(self, **kwargs): if kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/position/change-position-margin' else: suffix = '/v2/private/position/change-position-margin' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def set_trading_stop(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/position/trading-stop' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/position/trading-stop' else: suffix = '/v2/private/position/trading-stop' return self._submit_request( method='POST', path=self.endpoint + suffix, query=kwargs, auth=True ) def add_reduce_margin(self, **kwargs): return self._submit_request( method='GET', path=self.endpoint + '/private/linear/position/add-margin', query=kwargs, auth=True ) def user_leverage(self, **kwargs): self.logger.warning('This endpoint is deprecated and will be removed. Use my_position()') return self._submit_request( method='GET', path=self.endpoint + '/v2/private/position/list', query=kwargs, auth=True ) def change_user_leverage(self, **kwargs): self.logger.warning('This endpoint is deprecated and will be removed. Use set_leverage()') return self._submit_request( method='POST', path=self.endpoint + '/user/leverage/save', query=kwargs, auth=True ) def user_trade_records(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/trade/execution/list' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/execution/list' else: suffix = '/v2/private/execution/list' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def closed_profit_and_loss(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/trade/closed-pnl/list' elif kwargs.get('symbol', '')[-2:].isdigit(): suffix = '/futures/private/trade/closed-pnl/list' else: suffix = '/v2/private/trade/closed-pnl/list' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def get_risk_limit(self, is_linear=False): if is_linear: suffix = '/public/linear/risk-limit' else: suffix = '/open-api/wallet/risk-limit/list' return self._submit_request( method='GET', path=self.endpoint + suffix, auth=True ) def set_risk_limit(self, **kwargs): return self._submit_request( method='POST', path=self.endpoint + '/open-api/wallet/risk-limit', query=kwargs, auth=True ) def get_the_last_funding_rate(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/public/linear/funding/prev-funding-rate' else: suffix = '/v2/private/funding/prev-funding-rate' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs ) def my_last_funding_fee(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/funding/prev-funding' else: suffix = '/v2/private/funding/prev-funding' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def predicted_funding_rate(self, **kwargs): if kwargs.get('symbol', '').endswith('USDT'): suffix = '/private/linear/funding/predicted-funding' else: suffix = '/v2/private/funding/predicted-funding' return self._submit_request( method='GET', path=self.endpoint + suffix, query=kwargs, auth=True ) def api_key_info(self): return self._submit_request( method='GET', path=self.endpoint + '/v2/private/account/api-key', auth=True ) def lcp_info(self, **kwargs): return self._submit_request( method='GET', path=self.endpoint + '/v2/private/account/lcp', query=kwargs, auth=True ) def get_wallet_balance(self, **kwargs): return self._submit_request( method='GET', path=self.endpoint + '/v2/private/wallet/balance', query=kwargs, auth=True ) def wallet_fund_records(self, **kwargs): if 'from_id' in kwargs: kwargs['from'] = kwargs.pop('from_id') return self._submit_request( method='GET', path=self.endpoint + '/v2/private/wallet/fund/records', query=kwargs, auth=True ) def withdraw_records(self, **kwargs): return self._submit_request( method='GET', path=self.endpoint + '/v2/private/wallet/withdraw/list', query=kwargs, auth=True ) def asset_exchange_records(self, **kwargs): return self._submit_request( method='GET', path=self.endpoint + '/v2/private/exchange-order/list', query=kwargs, auth=True ) def server_time(self): return self._submit_request( method='GET', path=self.endpoint + '/v2/public/time' ) def announcement(self): return self._submit_request( method='GET', path=self.endpoint + '/v2/public/announcement' ) def close_position(self, symbol): try: r = self.my_position(symbol=symbol)['result'] # If there is no returned position, we want to handle that. except KeyError: return self.logger.error('No position detected.') # Next we generate a list of market orders orders = [ { 'symbol': symbol, 'order_type': 'Market', 'side': 'Buy' if p['side'] == 'Sell' else 'Sell', 'qty': p['size'], 'time_in_force': 'ImmediateOrCancel', 'reduce_only': True, 'close_on_trigger': True } for p in (r if isinstance(r, list) else [r]) if p['size'] > 0 ] if len(orders) == 0: return self.logger.error('No position detected.') # Submit a market order against each open position for the same qty. return self.place_active_order_bulk(orders) def _auth(self, method, params, recv_window): api_key = self.api_key api_secret = self.api_secret if api_key is None or api_secret is None: raise PermissionError('Authenticated endpoints require keys.') # Append required parameters. params['api_key'] = api_key params['recv_window'] = recv_window params['timestamp'] = int(time.time() * 10 ** 3) # Sort dictionary alphabetically to create querystring. _val = '&'.join( [str(k) + '=' + str(v) for k, v in sorted(params.items()) if (k != 'sign') and (v is not None)] ) # Bug fix. Replaces all capitalized booleans with lowercase. if method == 'POST': _val = _val.replace('True', 'true').replace('False', 'false') # Return signature. return str(hmac.new( bytes(api_secret, 'utf-8'), bytes(_val, 'utf-8'), digestmod='sha256' ).hexdigest()) def _submit_request(self, method=None, path=None, query=None, auth=False): # Store original recv_window. recv_window = self.recv_window # Bug fix: change floating whole numbers to integers to prevent # auth signature errors. if query is not None: for i in query.keys(): if isinstance(query[i], float) and query[i] == int(query[i]): query[i] = int(query[i]) # Send request and return headers with body. Retry if failed. retries_attempted = self.max_retries req_params = None while True: retries_attempted -= 1 if retries_attempted < 0: raise FailedRequestError( request=f'{method} {path}: {req_params}', message='Bad Request. Retries exceeded maximum.', status_code=400, time=dt.utcnow().strftime("%H:%M:%S") ) retries_remaining = f'{retries_attempted} retries remain.' # Authenticate if we are using a private endpoint. if auth: # Prepare signature. signature = self._auth( method=method, params=query, recv_window=recv_window, ) # Sort the dictionary alphabetically. query = dict(sorted(query.items(), key=lambda x: x)) # Append the signature to the dictionary. query['sign'] = signature # Define parameters and log the request. if query is not None: req_params = {k: v for k, v in query.items() if v is not None} else: req_params = {} # Log the request. if self.log_requests: self.logger.debug(f'Request -> {method} {path}: {req_params}') # Prepare request; use 'params' for GET and 'data' for POST. if method == 'GET': r = self.client.prepare_request( requests.Request(method, path, params=req_params) ) else: r = self.client.prepare_request( requests.Request(method, path, data=json.dumps(req_params)) ) # Attempt the request. try: s = self.client.send(r, timeout=self.timeout) # If requests fires an error, retry. except ( requests.exceptions.ReadTimeout, requests.exceptions.SSLError, requests.exceptions.ConnectionError ) as e: if self.force_retry: self.logger.error(f'{e}. {retries_remaining}') time.sleep(self.retry_delay) continue else: raise e # Convert response to dictionary, or raise if requests error. try: s_json = s.json() # If we have trouble converting, handle the error and retry. except JSONDecodeError as e: if self.force_retry: self.logger.error(f'{e}. {retries_remaining}') time.sleep(self.retry_delay) continue else: raise FailedRequestError( request=f'{method} {path}: {req_params}', message='Conflict. Could not decode JSON.', status_code=409, time=dt.utcnow().strftime("%H:%M:%S") ) # If Bybit returns an error, raise. if s_json['ret_code']: # Generate error message. error_msg = ( f'{s_json["ret_msg"]} (ErrCode: {s_json["ret_code"]})' ) # Set default retry delay. err_delay = self.retry_delay # Retry non-fatal whitelisted error requests. if s_json['ret_code'] in self.retry_codes: # 10002, recv_window error; add 2.5 seconds and retry. if s_json['ret_code'] == 10002: error_msg += '. Added 2.5 seconds to recv_window' recv_window += 2500 # 10006, ratelimit error; wait until rate_limit_reset_ms # and retry. elif s_json['ret_code'] == 10006: self.logger.error( f'{error_msg}. Ratelimited on current request. ' f'Sleeping, then trying again. Request: {path}' ) # Calculate how long we need to wait. limit_reset = s_json['rate_limit_reset_ms'] / 1000 reset_str = time.strftime( '%X', time.localtime(limit_reset) ) err_delay = int(limit_reset) - int(time.time()) error_msg = ( f'Ratelimit will reset at {reset_str}. ' f'Sleeping for {err_delay} seconds' ) # Log the error. self.logger.error(f'{error_msg}. {retries_remaining}') time.sleep(err_delay) continue elif s_json['ret_code'] in self.ignore_codes: pass else: raise InvalidRequestError( request=f'{method} {path}: {req_params}', message=s_json["ret_msg"], status_code=s_json["ret_code"], time=dt.utcnow().strftime("%H:%M:%S") ) else: return s_json class WebSocket: def __init__(self, endpoint, api_key=None, api_secret=None, subscriptions=None, logging_level=logging.INFO, max_data_length=200, ping_interval=30, ping_timeout=10, restart_on_error=True, purge_on_fetch=True, trim_data=True): if not subscriptions: raise Exception('Subscription list cannot be empty!') # Require symbol on 'trade' topic. if 'trade' in subscriptions: raise Exception('\'trade\' requires a ticker, e.g. ' '\'trade.BTCUSD\'.') # Require currency on 'insurance' topic. if 'insurance' in subscriptions: raise Exception('\'insurance\' requires a currency, e.g. ' '\'insurance.BTC\'.') # Require timeframe and ticker on 'klineV2' topic. if 'klineV2' in subscriptions: raise Exception('\'klineV2\' requires a timeframe and ticker, e.g.' ' \'klineV2.5.BTCUSD\'.') # set websocket name for logging purposes self.wsName = 'Authenticated' if api_key else 'Non-Authenticated' # Setup logger. self.logger = logging.getLogger(__name__) if len(logging.root.handlers) == 0: # no handler on root logger set -> we add handler just for this logger to not mess with custom logic from outside handler = logging.StreamHandler() handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) ) handler.setLevel(logging_level) self.logger.addHandler(handler) self.logger.debug(f'Initializing {self.wsName} WebSocket.') # Ensure authentication for private topics. if any(i in subscriptions for i in [ 'position', 'execution', 'order', 'stop_order', 'wallet' ]) and api_key is None: raise PermissionError('You must be authorized to use ' 'private topics!') # Set endpoint. self.endpoint = endpoint # Set API keys. self.api_key = api_key self.api_secret = api_secret # Set topic subscriptions for WebSocket. self.subscriptions = subscriptions self.max_length = max_data_length # Set ping settings. self.ping_interval = ping_interval self.ping_timeout = ping_timeout # Other optional data handling settings. self.handle_error = restart_on_error self.purge = purge_on_fetch self.trim = trim_data # Set initial state, initialize dictionary and connnect. self._reset() self._connect(self.endpoint) def fetch(self, topic): # If topic isn't a string. if not isinstance(topic, str): self.logger.error('Topic argument must be a string.') return if topic not in self.subscriptions: self.logger.error(f'You aren\'t subscribed to the {topic} topic.') return if topic.startswith(( 'trade', 'execution' )) and not topic.startswith('orderBook'): data = self.data[topic].copy() if self.purge: self.data[topic] = [] return data else: try: return self.data[topic] except KeyError: return [] def ping(self): self.ws.send(json.dumps({'op': 'ping'})) def exit(self): self.ws.close() while self.ws.sock: continue self.exited = True def _auth(self): expires = int((time.time() + 1) * 1000) _val = f'GET/realtime{expires}' signature = str(hmac.new( bytes(self.api_secret, 'utf-8'), bytes(_val, 'utf-8'), digestmod='sha256' ).hexdigest()) self.ws.send( json.dumps({ 'op': 'auth', 'args': [self.api_key, expires, signature] }) ) def _connect(self, url): self.ws = websocket.WebSocketApp( url=url, on_message=lambda ws, msg: self._on_message(msg), on_close=self._on_close(), on_open=self._on_open(), on_error=lambda ws, err: self._on_error(err) ) self.wst = threading.Thread(target=lambda: self.ws.run_forever( ping_interval=self.ping_interval, ping_timeout=self.ping_timeout )) self.wst.daemon = True self.wst.start() retries = 10 while retries > 0 and (not self.ws.sock or not self.ws.sock.connected): retries -= 1 time.sleep(1) if retries <= 0: self.exit() raise websocket.WebSocketTimeoutException('Connection failed.') if self.api_key and self.api_secret: self._auth() if isinstance(self.subscriptions, str): self.subscriptions = [self.subscriptions] self.ws.send( json.dumps({ 'op': 'subscribe', 'args': self.subscriptions }) ) for topic in self.subscriptions: if topic not in self.data: self.data[topic] = {} @staticmethod def _find_index(source, target, key): return next(i for i, j in enumerate(source) if j[key] == target[key]) def _on_message(self, message): msg_json = json.loads(message) if 'success' in msg_json: if msg_json['success']: if 'request' in msg_json: if msg_json['request']['op'] == 'auth': self.logger.debug('Authorization successful.') self.auth = True if msg_json['request']['op'] == 'subscribe': sub = msg_json['request']['args'] self.logger.debug(f'Subscription to {sub} successful.') else: response = msg_json['ret_msg'] if 'unknown topic' in response: self.logger.error('Couldn\'t subscribe to topic.' f' Error: {response}.') # If we get unsuccesful auth, notify user. elif msg_json['request']['op'] == 'auth': self.logger.debug('Authorization failed. Please check your ' 'API keys and restart.') elif 'topic' in msg_json: topic = msg_json['topic'] # If incoming 'orderbookL2' data. if 'orderBook' in topic: # Make updates according to delta response. if 'delta' in msg_json['type']: # Delete. for entry in msg_json['data']['delete']: index = self._find_index(self.data[topic], entry, 'id') self.data[topic].pop(index) # Update. for entry in msg_json['data']['update']: index = self._find_index(self.data[topic], entry, 'id') self.data[topic][index] = entry # Insert. for entry in msg_json['data']['insert']: self.data[topic].append(entry) # Record the initial snapshot. elif 'snapshot' in msg_json['type']: self.data[topic] = msg_json['data'] # For incoming 'order' and 'stop_order' data. elif any(i in topic for i in ['order', 'stop_order']): # record incoming data for i in msg_json['data']: try: # update existing entries # temporary workaround for field anomaly in stop_order data ord_id = topic + '_id' if i['symbol'].endswith('USDT') else 'order_id' index = self._find_index(self.data[topic], i, ord_id) self.data[topic][index] = i except StopIteration: # Keep appending or create new list if not already created. try: self.data[topic].append(i) except AttributeError: self.data[topic] = msg_json['data'] # For incoming 'trade' and 'execution' data. elif any(i in topic for i in ['trade', 'execution']): # Keep appending or create new list if not already created. try: for i in msg_json['data']: self.data[topic].append(i) except AttributeError: self.data[topic] = msg_json['data'] # If list is too long, pop the first entry. if len(self.data[topic]) > self.max_length: self.data[topic].pop(0) # If incoming 'insurance', 'klineV2', or 'wallet' data. elif any(i in topic for i in ['insurance', 'klineV2', 'wallet', 'candle']): # Record incoming data. self.data[topic] = msg_json['data'][0] if self.trim else msg_json # If incoming 'instrument_info' data. elif 'instrument_info' in topic: # Make updates according to delta response. if 'delta' in msg_json['type']: for i in msg_json['data']['update'][0]: self.data[topic][i] = msg_json['data']['update'][0][i] # Record the initial snapshot. elif 'snapshot' in msg_json['type']: self.data[topic] = msg_json['data'] if self.trim else msg_json # If incoming 'position' data. elif 'position' in topic: # Record incoming position data. for p in msg_json['data']: # linear (USDT) positions have Buy|Sell side and # updates contain all USDT positions. # For linear tickers... if p['symbol'].endswith('USDT'): try: self.data[topic][p['symbol']][p['side']] = p # if side key hasn't been created yet... except KeyError: self.data[topic][p['symbol']] = {p['side']: p} else: self.data[topic][p['symbol']] = p def _on_error(self, error): if not self.exited: self.logger.error(f'WebSocket {self.wsName} encountered error: {error}.') self.exit() if self.handle_error: self._reset() self._connect(self.endpoint) def _on_open(self): self.logger.debug(f'WebSocket {self.wsName} opened.') def _on_close(self): self.logger.debug(f'WebSocket {self.wsName} closed.') def _reset(self): self.exited = False self.auth = False self.data = {}
true
true
f72482536502a08c92fdd47d1959c93914af950f
1,330
py
Python
Competition Codes/packageFunction.py
Harrdy2018/2018-Huawei-Code-Craft
62fa76c658746550b2e5c8ef059a5c748e93c06c
[ "Apache-2.0" ]
1
2019-04-08T04:15:51.000Z
2019-04-08T04:15:51.000Z
Competition Codes/packageFunction.py
Harrdy2018/2018-Huawei-Code-Craft
62fa76c658746550b2e5c8ef059a5c748e93c06c
[ "Apache-2.0" ]
null
null
null
Competition Codes/packageFunction.py
Harrdy2018/2018-Huawei-Code-Craft
62fa76c658746550b2e5c8ef059a5c748e93c06c
[ "Apache-2.0" ]
2
2018-04-16T10:11:24.000Z
2019-06-28T06:30:28.000Z
#coding=utf-8 def MultiplePackage(N,C,weight,value,num,physic): ''' 多重背包问题(每个物品都有次数限制) :param N: 预测的虚拟机种类,如N=pre_num :param C:输入文件是CPU,那么背包总容量就是MEM,如C= :param weight: 每个物品的容量数组表示,如weight=[0,5,4,7,2,6] :param value: 每个物品的价值数组表示,如value=[0,12,3,10,3,6] :param num:每个物品的个数限制,如num=[0,2,4,1,5,3] :return: 返回总价值矩阵 ''' #初始化f[N+1][C+1]为0,f[i][j]表示前i件物品恰好放入一个容器为j的背包可以获得的最大价值 f=[[0 for col in range(C+1)] for row in range(N+1)] for i in range(1,N+1): for j in range(1,C+1): #对于物品i最多能取的次数是j/weight[i]与num[i]的较小者 max_num_i=min(j/weight[i],num[i]) #初始取k=0为最大,下面的循环是把取了k个物品i能获得的最大价值赋值给f[i][j] f[i][j]=f[i-1][j] for k in range(max_num_i+1): if f[i][j]<f[i-1][j-k*weight[i]]+k*value[i]<=physic: #状态方程 f[i][j]=f[i-1][j-k*weight[i]]+k*value[i] return f def FindWhat(f,value,weight,i,j,item,num): if i>=0: if f[i][j]==f[i-1][j]: item[i]=0 FindWhat(f,value,weight,i-1,j,item,num) elif j-weight[i]>=0: for k in range(num[i]+1): if f[i][j]==f[i-1][j-k*weight[i]]+k*value[i]: item[i]=k break FindWhat(f,value,weight,i-1,j-item[i]*weight[i],item,num)
35
69
0.52406
def MultiplePackage(N,C,weight,value,num,physic): f=[[0 for col in range(C+1)] for row in range(N+1)] for i in range(1,N+1): for j in range(1,C+1): max_num_i=min(j/weight[i],num[i]) f[i][j]=f[i-1][j] for k in range(max_num_i+1): if f[i][j]<f[i-1][j-k*weight[i]]+k*value[i]<=physic: f[i][j]=f[i-1][j-k*weight[i]]+k*value[i] return f def FindWhat(f,value,weight,i,j,item,num): if i>=0: if f[i][j]==f[i-1][j]: item[i]=0 FindWhat(f,value,weight,i-1,j,item,num) elif j-weight[i]>=0: for k in range(num[i]+1): if f[i][j]==f[i-1][j-k*weight[i]]+k*value[i]: item[i]=k break FindWhat(f,value,weight,i-1,j-item[i]*weight[i],item,num)
true
true
f72482abf2030cf6d190809b6914b9bdbeec552b
3,074
py
Python
users/migrations/0001_initial.py
SohailAQ/Rest_Alpha
326009217f16164c7f4667e7b3dbb82e43cf2469
[ "MIT" ]
null
null
null
users/migrations/0001_initial.py
SohailAQ/Rest_Alpha
326009217f16164c7f4667e7b3dbb82e43cf2469
[ "MIT" ]
null
null
null
users/migrations/0001_initial.py
SohailAQ/Rest_Alpha
326009217f16164c7f4667e7b3dbb82e43cf2469
[ "MIT" ]
null
null
null
# Generated by Django 3.2.8 on 2021-10-21 16:43 import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0012_alter_user_first_name_max_length'), ] operations = [ migrations.CreateModel( name='CustomUser', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female'), ('NS', 'Not Specified')], max_length=20)), ('phone_number', models.CharField(max_length=30)), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'verbose_name': 'user', 'verbose_name_plural': 'users', 'abstract': False, }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
65.404255
329
0.656474
import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0012_alter_user_first_name_max_length'), ] operations = [ migrations.CreateModel( name='CustomUser', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female'), ('NS', 'Not Specified')], max_length=20)), ('phone_number', models.CharField(max_length=30)), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], options={ 'verbose_name': 'user', 'verbose_name_plural': 'users', 'abstract': False, }, managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), ]
true
true
f72482c36144879a71a381f8ffacbf087f5e4594
28,655
py
Python
sdk/python/pulumi_kubernetes/core/v1/Event.py
polivbr/pulumi-kubernetes
36a5fb34240a38a60b52a5f4e55e66e248d9305f
[ "Apache-2.0" ]
277
2018-06-18T14:57:09.000Z
2022-03-29T04:05:06.000Z
sdk/python/pulumi_kubernetes/core/v1/Event.py
polivbr/pulumi-kubernetes
36a5fb34240a38a60b52a5f4e55e66e248d9305f
[ "Apache-2.0" ]
1,447
2018-06-20T00:58:34.000Z
2022-03-31T21:28:43.000Z
sdk/python/pulumi_kubernetes/core/v1/Event.py
polivbr/pulumi-kubernetes
36a5fb34240a38a60b52a5f4e55e66e248d9305f
[ "Apache-2.0" ]
95
2018-06-30T03:30:05.000Z
2022-03-29T04:05:09.000Z
# coding=utf-8 # *** WARNING: this file was generated by pulumigen. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ... import meta as _meta from ._inputs import * __all__ = ['EventInitArgs', 'Event'] @pulumi.input_type class EventInitArgs: def __init__(__self__, *, involved_object: pulumi.Input['ObjectReferenceArgs'], metadata: pulumi.Input['_meta.v1.ObjectMetaArgs'], action: Optional[pulumi.Input[str]] = None, api_version: Optional[pulumi.Input[str]] = None, count: Optional[pulumi.Input[int]] = None, event_time: Optional[pulumi.Input[str]] = None, first_timestamp: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[str]] = None, last_timestamp: Optional[pulumi.Input[str]] = None, message: Optional[pulumi.Input[str]] = None, reason: Optional[pulumi.Input[str]] = None, related: Optional[pulumi.Input['ObjectReferenceArgs']] = None, reporting_component: Optional[pulumi.Input[str]] = None, reporting_instance: Optional[pulumi.Input[str]] = None, series: Optional[pulumi.Input['EventSeriesArgs']] = None, source: Optional[pulumi.Input['EventSourceArgs']] = None, type: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a Event resource. :param pulumi.Input['ObjectReferenceArgs'] involved_object: The object that this event is about. :param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata :param pulumi.Input[str] action: What action was taken/failed regarding to the Regarding object. :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources :param pulumi.Input[int] count: The number of times this event has occurred. :param pulumi.Input[str] event_time: Time when this Event was first observed. :param pulumi.Input[str] first_timestamp: The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds :param pulumi.Input[str] last_timestamp: The time at which the most recent occurrence of this event was recorded. :param pulumi.Input[str] message: A human-readable description of the status of this operation. :param pulumi.Input[str] reason: This should be a short, machine understandable string that gives the reason for the transition into the object's current status. :param pulumi.Input['ObjectReferenceArgs'] related: Optional secondary object for more complex actions. :param pulumi.Input[str] reporting_component: Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. :param pulumi.Input[str] reporting_instance: ID of the controller instance, e.g. `kubelet-xyzf`. :param pulumi.Input['EventSeriesArgs'] series: Data about the Event series this event represents or nil if it's a singleton Event. :param pulumi.Input['EventSourceArgs'] source: The component reporting this event. Should be a short machine understandable string. :param pulumi.Input[str] type: Type of this event (Normal, Warning), new types could be added in the future """ pulumi.set(__self__, "involved_object", involved_object) pulumi.set(__self__, "metadata", metadata) if action is not None: pulumi.set(__self__, "action", action) if api_version is not None: pulumi.set(__self__, "api_version", 'v1') if count is not None: pulumi.set(__self__, "count", count) if event_time is not None: pulumi.set(__self__, "event_time", event_time) if first_timestamp is not None: pulumi.set(__self__, "first_timestamp", first_timestamp) if kind is not None: pulumi.set(__self__, "kind", 'Event') if last_timestamp is not None: pulumi.set(__self__, "last_timestamp", last_timestamp) if message is not None: pulumi.set(__self__, "message", message) if reason is not None: pulumi.set(__self__, "reason", reason) if related is not None: pulumi.set(__self__, "related", related) if reporting_component is not None: pulumi.set(__self__, "reporting_component", reporting_component) if reporting_instance is not None: pulumi.set(__self__, "reporting_instance", reporting_instance) if series is not None: pulumi.set(__self__, "series", series) if source is not None: pulumi.set(__self__, "source", source) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="involvedObject") def involved_object(self) -> pulumi.Input['ObjectReferenceArgs']: """ The object that this event is about. """ return pulumi.get(self, "involved_object") @involved_object.setter def involved_object(self, value: pulumi.Input['ObjectReferenceArgs']): pulumi.set(self, "involved_object", value) @property @pulumi.getter def metadata(self) -> pulumi.Input['_meta.v1.ObjectMetaArgs']: """ Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata """ return pulumi.get(self, "metadata") @metadata.setter def metadata(self, value: pulumi.Input['_meta.v1.ObjectMetaArgs']): pulumi.set(self, "metadata", value) @property @pulumi.getter def action(self) -> Optional[pulumi.Input[str]]: """ What action was taken/failed regarding to the Regarding object. """ return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "action", value) @property @pulumi.getter(name="apiVersion") def api_version(self) -> Optional[pulumi.Input[str]]: """ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources """ return pulumi.get(self, "api_version") @api_version.setter def api_version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "api_version", value) @property @pulumi.getter def count(self) -> Optional[pulumi.Input[int]]: """ The number of times this event has occurred. """ return pulumi.get(self, "count") @count.setter def count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "count", value) @property @pulumi.getter(name="eventTime") def event_time(self) -> Optional[pulumi.Input[str]]: """ Time when this Event was first observed. """ return pulumi.get(self, "event_time") @event_time.setter def event_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "event_time", value) @property @pulumi.getter(name="firstTimestamp") def first_timestamp(self) -> Optional[pulumi.Input[str]]: """ The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) """ return pulumi.get(self, "first_timestamp") @first_timestamp.setter def first_timestamp(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "first_timestamp", value) @property @pulumi.getter def kind(self) -> Optional[pulumi.Input[str]]: """ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds """ return pulumi.get(self, "kind") @kind.setter def kind(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "kind", value) @property @pulumi.getter(name="lastTimestamp") def last_timestamp(self) -> Optional[pulumi.Input[str]]: """ The time at which the most recent occurrence of this event was recorded. """ return pulumi.get(self, "last_timestamp") @last_timestamp.setter def last_timestamp(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "last_timestamp", value) @property @pulumi.getter def message(self) -> Optional[pulumi.Input[str]]: """ A human-readable description of the status of this operation. """ return pulumi.get(self, "message") @message.setter def message(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "message", value) @property @pulumi.getter def reason(self) -> Optional[pulumi.Input[str]]: """ This should be a short, machine understandable string that gives the reason for the transition into the object's current status. """ return pulumi.get(self, "reason") @reason.setter def reason(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "reason", value) @property @pulumi.getter def related(self) -> Optional[pulumi.Input['ObjectReferenceArgs']]: """ Optional secondary object for more complex actions. """ return pulumi.get(self, "related") @related.setter def related(self, value: Optional[pulumi.Input['ObjectReferenceArgs']]): pulumi.set(self, "related", value) @property @pulumi.getter(name="reportingComponent") def reporting_component(self) -> Optional[pulumi.Input[str]]: """ Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. """ return pulumi.get(self, "reporting_component") @reporting_component.setter def reporting_component(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "reporting_component", value) @property @pulumi.getter(name="reportingInstance") def reporting_instance(self) -> Optional[pulumi.Input[str]]: """ ID of the controller instance, e.g. `kubelet-xyzf`. """ return pulumi.get(self, "reporting_instance") @reporting_instance.setter def reporting_instance(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "reporting_instance", value) @property @pulumi.getter def series(self) -> Optional[pulumi.Input['EventSeriesArgs']]: """ Data about the Event series this event represents or nil if it's a singleton Event. """ return pulumi.get(self, "series") @series.setter def series(self, value: Optional[pulumi.Input['EventSeriesArgs']]): pulumi.set(self, "series", value) @property @pulumi.getter def source(self) -> Optional[pulumi.Input['EventSourceArgs']]: """ The component reporting this event. Should be a short machine understandable string. """ return pulumi.get(self, "source") @source.setter def source(self, value: Optional[pulumi.Input['EventSourceArgs']]): pulumi.set(self, "source", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: """ Type of this event (Normal, Warning), new types could be added in the future """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) class Event(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, action: Optional[pulumi.Input[str]] = None, api_version: Optional[pulumi.Input[str]] = None, count: Optional[pulumi.Input[int]] = None, event_time: Optional[pulumi.Input[str]] = None, first_timestamp: Optional[pulumi.Input[str]] = None, involved_object: Optional[pulumi.Input[pulumi.InputType['ObjectReferenceArgs']]] = None, kind: Optional[pulumi.Input[str]] = None, last_timestamp: Optional[pulumi.Input[str]] = None, message: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None, reason: Optional[pulumi.Input[str]] = None, related: Optional[pulumi.Input[pulumi.InputType['ObjectReferenceArgs']]] = None, reporting_component: Optional[pulumi.Input[str]] = None, reporting_instance: Optional[pulumi.Input[str]] = None, series: Optional[pulumi.Input[pulumi.InputType['EventSeriesArgs']]] = None, source: Optional[pulumi.Input[pulumi.InputType['EventSourceArgs']]] = None, type: Optional[pulumi.Input[str]] = None, __props__=None): """ Event is a report of an event somewhere in the cluster. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] action: What action was taken/failed regarding to the Regarding object. :param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources :param pulumi.Input[int] count: The number of times this event has occurred. :param pulumi.Input[str] event_time: Time when this Event was first observed. :param pulumi.Input[str] first_timestamp: The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) :param pulumi.Input[pulumi.InputType['ObjectReferenceArgs']] involved_object: The object that this event is about. :param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds :param pulumi.Input[str] last_timestamp: The time at which the most recent occurrence of this event was recorded. :param pulumi.Input[str] message: A human-readable description of the status of this operation. :param pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata :param pulumi.Input[str] reason: This should be a short, machine understandable string that gives the reason for the transition into the object's current status. :param pulumi.Input[pulumi.InputType['ObjectReferenceArgs']] related: Optional secondary object for more complex actions. :param pulumi.Input[str] reporting_component: Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. :param pulumi.Input[str] reporting_instance: ID of the controller instance, e.g. `kubelet-xyzf`. :param pulumi.Input[pulumi.InputType['EventSeriesArgs']] series: Data about the Event series this event represents or nil if it's a singleton Event. :param pulumi.Input[pulumi.InputType['EventSourceArgs']] source: The component reporting this event. Should be a short machine understandable string. :param pulumi.Input[str] type: Type of this event (Normal, Warning), new types could be added in the future """ ... @overload def __init__(__self__, resource_name: str, args: EventInitArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Event is a report of an event somewhere in the cluster. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data. :param str resource_name: The name of the resource. :param EventInitArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(EventInitArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, action: Optional[pulumi.Input[str]] = None, api_version: Optional[pulumi.Input[str]] = None, count: Optional[pulumi.Input[int]] = None, event_time: Optional[pulumi.Input[str]] = None, first_timestamp: Optional[pulumi.Input[str]] = None, involved_object: Optional[pulumi.Input[pulumi.InputType['ObjectReferenceArgs']]] = None, kind: Optional[pulumi.Input[str]] = None, last_timestamp: Optional[pulumi.Input[str]] = None, message: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None, reason: Optional[pulumi.Input[str]] = None, related: Optional[pulumi.Input[pulumi.InputType['ObjectReferenceArgs']]] = None, reporting_component: Optional[pulumi.Input[str]] = None, reporting_instance: Optional[pulumi.Input[str]] = None, series: Optional[pulumi.Input[pulumi.InputType['EventSeriesArgs']]] = None, source: Optional[pulumi.Input[pulumi.InputType['EventSourceArgs']]] = None, type: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = EventInitArgs.__new__(EventInitArgs) __props__.__dict__["action"] = action __props__.__dict__["api_version"] = 'v1' __props__.__dict__["count"] = count __props__.__dict__["event_time"] = event_time __props__.__dict__["first_timestamp"] = first_timestamp if involved_object is None and not opts.urn: raise TypeError("Missing required property 'involved_object'") __props__.__dict__["involved_object"] = involved_object __props__.__dict__["kind"] = 'Event' __props__.__dict__["last_timestamp"] = last_timestamp __props__.__dict__["message"] = message if metadata is None and not opts.urn: raise TypeError("Missing required property 'metadata'") __props__.__dict__["metadata"] = metadata __props__.__dict__["reason"] = reason __props__.__dict__["related"] = related __props__.__dict__["reporting_component"] = reporting_component __props__.__dict__["reporting_instance"] = reporting_instance __props__.__dict__["series"] = series __props__.__dict__["source"] = source __props__.__dict__["type"] = type alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:events.k8s.io/v1:Event"), pulumi.Alias(type_="kubernetes:events.k8s.io/v1beta1:Event")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(Event, __self__).__init__( 'kubernetes:core/v1:Event', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'Event': """ Get an existing Event resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = EventInitArgs.__new__(EventInitArgs) __props__.__dict__["action"] = None __props__.__dict__["api_version"] = None __props__.__dict__["count"] = None __props__.__dict__["event_time"] = None __props__.__dict__["first_timestamp"] = None __props__.__dict__["involved_object"] = None __props__.__dict__["kind"] = None __props__.__dict__["last_timestamp"] = None __props__.__dict__["message"] = None __props__.__dict__["metadata"] = None __props__.__dict__["reason"] = None __props__.__dict__["related"] = None __props__.__dict__["reporting_component"] = None __props__.__dict__["reporting_instance"] = None __props__.__dict__["series"] = None __props__.__dict__["source"] = None __props__.__dict__["type"] = None return Event(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def action(self) -> pulumi.Output[Optional[str]]: """ What action was taken/failed regarding to the Regarding object. """ return pulumi.get(self, "action") @property @pulumi.getter(name="apiVersion") def api_version(self) -> pulumi.Output[Optional[str]]: """ APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources """ return pulumi.get(self, "api_version") @property @pulumi.getter def count(self) -> pulumi.Output[Optional[int]]: """ The number of times this event has occurred. """ return pulumi.get(self, "count") @property @pulumi.getter(name="eventTime") def event_time(self) -> pulumi.Output[Optional[str]]: """ Time when this Event was first observed. """ return pulumi.get(self, "event_time") @property @pulumi.getter(name="firstTimestamp") def first_timestamp(self) -> pulumi.Output[Optional[str]]: """ The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) """ return pulumi.get(self, "first_timestamp") @property @pulumi.getter(name="involvedObject") def involved_object(self) -> pulumi.Output['outputs.ObjectReference']: """ The object that this event is about. """ return pulumi.get(self, "involved_object") @property @pulumi.getter def kind(self) -> pulumi.Output[Optional[str]]: """ Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds """ return pulumi.get(self, "kind") @property @pulumi.getter(name="lastTimestamp") def last_timestamp(self) -> pulumi.Output[Optional[str]]: """ The time at which the most recent occurrence of this event was recorded. """ return pulumi.get(self, "last_timestamp") @property @pulumi.getter def message(self) -> pulumi.Output[Optional[str]]: """ A human-readable description of the status of this operation. """ return pulumi.get(self, "message") @property @pulumi.getter def metadata(self) -> pulumi.Output['_meta.v1.outputs.ObjectMeta']: """ Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata """ return pulumi.get(self, "metadata") @property @pulumi.getter def reason(self) -> pulumi.Output[Optional[str]]: """ This should be a short, machine understandable string that gives the reason for the transition into the object's current status. """ return pulumi.get(self, "reason") @property @pulumi.getter def related(self) -> pulumi.Output[Optional['outputs.ObjectReference']]: """ Optional secondary object for more complex actions. """ return pulumi.get(self, "related") @property @pulumi.getter(name="reportingComponent") def reporting_component(self) -> pulumi.Output[Optional[str]]: """ Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. """ return pulumi.get(self, "reporting_component") @property @pulumi.getter(name="reportingInstance") def reporting_instance(self) -> pulumi.Output[Optional[str]]: """ ID of the controller instance, e.g. `kubelet-xyzf`. """ return pulumi.get(self, "reporting_instance") @property @pulumi.getter def series(self) -> pulumi.Output[Optional['outputs.EventSeries']]: """ Data about the Event series this event represents or nil if it's a singleton Event. """ return pulumi.get(self, "series") @property @pulumi.getter def source(self) -> pulumi.Output[Optional['outputs.EventSource']]: """ The component reporting this event. Should be a short machine understandable string. """ return pulumi.get(self, "source") @property @pulumi.getter def type(self) -> pulumi.Output[Optional[str]]: """ Type of this event (Normal, Warning), new types could be added in the future """ return pulumi.get(self, "type")
48.322091
401
0.658873
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ... import meta as _meta from ._inputs import * __all__ = ['EventInitArgs', 'Event'] @pulumi.input_type class EventInitArgs: def __init__(__self__, *, involved_object: pulumi.Input['ObjectReferenceArgs'], metadata: pulumi.Input['_meta.v1.ObjectMetaArgs'], action: Optional[pulumi.Input[str]] = None, api_version: Optional[pulumi.Input[str]] = None, count: Optional[pulumi.Input[int]] = None, event_time: Optional[pulumi.Input[str]] = None, first_timestamp: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[str]] = None, last_timestamp: Optional[pulumi.Input[str]] = None, message: Optional[pulumi.Input[str]] = None, reason: Optional[pulumi.Input[str]] = None, related: Optional[pulumi.Input['ObjectReferenceArgs']] = None, reporting_component: Optional[pulumi.Input[str]] = None, reporting_instance: Optional[pulumi.Input[str]] = None, series: Optional[pulumi.Input['EventSeriesArgs']] = None, source: Optional[pulumi.Input['EventSourceArgs']] = None, type: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "involved_object", involved_object) pulumi.set(__self__, "metadata", metadata) if action is not None: pulumi.set(__self__, "action", action) if api_version is not None: pulumi.set(__self__, "api_version", 'v1') if count is not None: pulumi.set(__self__, "count", count) if event_time is not None: pulumi.set(__self__, "event_time", event_time) if first_timestamp is not None: pulumi.set(__self__, "first_timestamp", first_timestamp) if kind is not None: pulumi.set(__self__, "kind", 'Event') if last_timestamp is not None: pulumi.set(__self__, "last_timestamp", last_timestamp) if message is not None: pulumi.set(__self__, "message", message) if reason is not None: pulumi.set(__self__, "reason", reason) if related is not None: pulumi.set(__self__, "related", related) if reporting_component is not None: pulumi.set(__self__, "reporting_component", reporting_component) if reporting_instance is not None: pulumi.set(__self__, "reporting_instance", reporting_instance) if series is not None: pulumi.set(__self__, "series", series) if source is not None: pulumi.set(__self__, "source", source) if type is not None: pulumi.set(__self__, "type", type) @property @pulumi.getter(name="involvedObject") def involved_object(self) -> pulumi.Input['ObjectReferenceArgs']: return pulumi.get(self, "involved_object") @involved_object.setter def involved_object(self, value: pulumi.Input['ObjectReferenceArgs']): pulumi.set(self, "involved_object", value) @property @pulumi.getter def metadata(self) -> pulumi.Input['_meta.v1.ObjectMetaArgs']: return pulumi.get(self, "metadata") @metadata.setter def metadata(self, value: pulumi.Input['_meta.v1.ObjectMetaArgs']): pulumi.set(self, "metadata", value) @property @pulumi.getter def action(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "action") @action.setter def action(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "action", value) @property @pulumi.getter(name="apiVersion") def api_version(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "api_version") @api_version.setter def api_version(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "api_version", value) @property @pulumi.getter def count(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "count") @count.setter def count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "count", value) @property @pulumi.getter(name="eventTime") def event_time(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "event_time") @event_time.setter def event_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "event_time", value) @property @pulumi.getter(name="firstTimestamp") def first_timestamp(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "first_timestamp") @first_timestamp.setter def first_timestamp(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "first_timestamp", value) @property @pulumi.getter def kind(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "kind") @kind.setter def kind(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "kind", value) @property @pulumi.getter(name="lastTimestamp") def last_timestamp(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "last_timestamp") @last_timestamp.setter def last_timestamp(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "last_timestamp", value) @property @pulumi.getter def message(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "message") @message.setter def message(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "message", value) @property @pulumi.getter def reason(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "reason") @reason.setter def reason(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "reason", value) @property @pulumi.getter def related(self) -> Optional[pulumi.Input['ObjectReferenceArgs']]: return pulumi.get(self, "related") @related.setter def related(self, value: Optional[pulumi.Input['ObjectReferenceArgs']]): pulumi.set(self, "related", value) @property @pulumi.getter(name="reportingComponent") def reporting_component(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "reporting_component") @reporting_component.setter def reporting_component(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "reporting_component", value) @property @pulumi.getter(name="reportingInstance") def reporting_instance(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "reporting_instance") @reporting_instance.setter def reporting_instance(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "reporting_instance", value) @property @pulumi.getter def series(self) -> Optional[pulumi.Input['EventSeriesArgs']]: return pulumi.get(self, "series") @series.setter def series(self, value: Optional[pulumi.Input['EventSeriesArgs']]): pulumi.set(self, "series", value) @property @pulumi.getter def source(self) -> Optional[pulumi.Input['EventSourceArgs']]: return pulumi.get(self, "source") @source.setter def source(self, value: Optional[pulumi.Input['EventSourceArgs']]): pulumi.set(self, "source", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) class Event(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, action: Optional[pulumi.Input[str]] = None, api_version: Optional[pulumi.Input[str]] = None, count: Optional[pulumi.Input[int]] = None, event_time: Optional[pulumi.Input[str]] = None, first_timestamp: Optional[pulumi.Input[str]] = None, involved_object: Optional[pulumi.Input[pulumi.InputType['ObjectReferenceArgs']]] = None, kind: Optional[pulumi.Input[str]] = None, last_timestamp: Optional[pulumi.Input[str]] = None, message: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None, reason: Optional[pulumi.Input[str]] = None, related: Optional[pulumi.Input[pulumi.InputType['ObjectReferenceArgs']]] = None, reporting_component: Optional[pulumi.Input[str]] = None, reporting_instance: Optional[pulumi.Input[str]] = None, series: Optional[pulumi.Input[pulumi.InputType['EventSeriesArgs']]] = None, source: Optional[pulumi.Input[pulumi.InputType['EventSourceArgs']]] = None, type: Optional[pulumi.Input[str]] = None, __props__=None): ... @overload def __init__(__self__, resource_name: str, args: EventInitArgs, opts: Optional[pulumi.ResourceOptions] = None): ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(EventInitArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, action: Optional[pulumi.Input[str]] = None, api_version: Optional[pulumi.Input[str]] = None, count: Optional[pulumi.Input[int]] = None, event_time: Optional[pulumi.Input[str]] = None, first_timestamp: Optional[pulumi.Input[str]] = None, involved_object: Optional[pulumi.Input[pulumi.InputType['ObjectReferenceArgs']]] = None, kind: Optional[pulumi.Input[str]] = None, last_timestamp: Optional[pulumi.Input[str]] = None, message: Optional[pulumi.Input[str]] = None, metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None, reason: Optional[pulumi.Input[str]] = None, related: Optional[pulumi.Input[pulumi.InputType['ObjectReferenceArgs']]] = None, reporting_component: Optional[pulumi.Input[str]] = None, reporting_instance: Optional[pulumi.Input[str]] = None, series: Optional[pulumi.Input[pulumi.InputType['EventSeriesArgs']]] = None, source: Optional[pulumi.Input[pulumi.InputType['EventSourceArgs']]] = None, type: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = EventInitArgs.__new__(EventInitArgs) __props__.__dict__["action"] = action __props__.__dict__["api_version"] = 'v1' __props__.__dict__["count"] = count __props__.__dict__["event_time"] = event_time __props__.__dict__["first_timestamp"] = first_timestamp if involved_object is None and not opts.urn: raise TypeError("Missing required property 'involved_object'") __props__.__dict__["involved_object"] = involved_object __props__.__dict__["kind"] = 'Event' __props__.__dict__["last_timestamp"] = last_timestamp __props__.__dict__["message"] = message if metadata is None and not opts.urn: raise TypeError("Missing required property 'metadata'") __props__.__dict__["metadata"] = metadata __props__.__dict__["reason"] = reason __props__.__dict__["related"] = related __props__.__dict__["reporting_component"] = reporting_component __props__.__dict__["reporting_instance"] = reporting_instance __props__.__dict__["series"] = series __props__.__dict__["source"] = source __props__.__dict__["type"] = type alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:events.k8s.io/v1:Event"), pulumi.Alias(type_="kubernetes:events.k8s.io/v1beta1:Event")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(Event, __self__).__init__( 'kubernetes:core/v1:Event', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'Event': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = EventInitArgs.__new__(EventInitArgs) __props__.__dict__["action"] = None __props__.__dict__["api_version"] = None __props__.__dict__["count"] = None __props__.__dict__["event_time"] = None __props__.__dict__["first_timestamp"] = None __props__.__dict__["involved_object"] = None __props__.__dict__["kind"] = None __props__.__dict__["last_timestamp"] = None __props__.__dict__["message"] = None __props__.__dict__["metadata"] = None __props__.__dict__["reason"] = None __props__.__dict__["related"] = None __props__.__dict__["reporting_component"] = None __props__.__dict__["reporting_instance"] = None __props__.__dict__["series"] = None __props__.__dict__["source"] = None __props__.__dict__["type"] = None return Event(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def action(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "action") @property @pulumi.getter(name="apiVersion") def api_version(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "api_version") @property @pulumi.getter def count(self) -> pulumi.Output[Optional[int]]: return pulumi.get(self, "count") @property @pulumi.getter(name="eventTime") def event_time(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "event_time") @property @pulumi.getter(name="firstTimestamp") def first_timestamp(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "first_timestamp") @property @pulumi.getter(name="involvedObject") def involved_object(self) -> pulumi.Output['outputs.ObjectReference']: return pulumi.get(self, "involved_object") @property @pulumi.getter def kind(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "kind") @property @pulumi.getter(name="lastTimestamp") def last_timestamp(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "last_timestamp") @property @pulumi.getter def message(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "message") @property @pulumi.getter def metadata(self) -> pulumi.Output['_meta.v1.outputs.ObjectMeta']: return pulumi.get(self, "metadata") @property @pulumi.getter def reason(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "reason") @property @pulumi.getter def related(self) -> pulumi.Output[Optional['outputs.ObjectReference']]: return pulumi.get(self, "related") @property @pulumi.getter(name="reportingComponent") def reporting_component(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "reporting_component") @property @pulumi.getter(name="reportingInstance") def reporting_instance(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "reporting_instance") @property @pulumi.getter def series(self) -> pulumi.Output[Optional['outputs.EventSeries']]: return pulumi.get(self, "series") @property @pulumi.getter def source(self) -> pulumi.Output[Optional['outputs.EventSource']]: return pulumi.get(self, "source") @property @pulumi.getter def type(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "type")
true
true
f724835605b65f1f1e1ae6d86dd97f931443263b
2,525
py
Python
stats.py
JoKalliauer/resvg-test-suite
c1a0e510bcdb89275b30caeb3725208304a26754
[ "MIT" ]
1
2021-06-02T11:21:41.000Z
2021-06-02T11:21:41.000Z
stats.py
adarshkrtiwari/resvg-test-suite
c2cb84b630332ea585fc54ba62e62d35fb4a33dd
[ "MIT" ]
null
null
null
stats.py
adarshkrtiwari/resvg-test-suite
c2cb84b630332ea585fc54ba62e62d35fb4a33dd
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Usage: # ./stats.py results.csv chart.svg # ./stats.py official.csv official_chart.svg import argparse import csv import json import subprocess UNKNOWN = 0 PASSED = 1 FAILED = 2 CRASHED = 3 PARTIAL = 4 OUT_OF_SCOPE = 5 class RowData: def __init__(self, name, flags): self.name = name self.flags = flags parser = argparse.ArgumentParser() parser.add_argument('input', help='CSV file') parser.add_argument('output', help='SVG file') args = parser.parse_args() rows = [] with open(args.input, 'r') as f: for row in csv.reader(f): if row[0] == 'title': continue file_name = row[0] flags = [int(row[1]), int(row[2]), int(row[3]), int(row[4]), int(row[5]), int(row[6]), int(row[7]), int(row[8]), int(row[9])] rows.append(RowData(file_name, flags)) passed = [0, 0, 0, 0, 0, 0, 0, 0, 0] for row in rows: for idx, flag in enumerate(row.flags): if flag == PASSED or flag == UNKNOWN: passed[idx] = passed[idx] + 1 barh_data = json.dumps( { "items_font": { "family": "Arial", "size": 12 }, "items": [ { "name": "resvg 0.14.1", "value": passed[2] }, { "name": "Chromium r856583", "value": passed[0] }, { "name": "Firefox 87", "value": passed[1] }, { "name": "Inkscape 1.0.2", "value": passed[4] }, { "name": "librsvg 2.51.1", "value": passed[5] }, { "name": "Batik 1.14", "value": passed[3] }, { "name": "SVG.NET 3.2.3", "value": passed[6] }, { "name": "QtSvg 5.15.2", "value": passed[8] }, { "name": "wxSvg 1.5.11", "value": passed[7] } ], "hor_axis": { "title": "Tests passed", "round_tick_values": True, "width": 700, "max_value": len(rows) } }, indent=4) with open('chart.json', 'w') as f: f.write(barh_data) try: subprocess.check_call(['./barh', 'chart.json', 'site/images/' + args.output]) except FileNotFoundError: print('Error: \'barh\' executable is not found.\n' 'You should build https://github.com/RazrFalcon/barh ' 'and link resultig binary to the current directory.')
22.747748
81
0.486337
import argparse import csv import json import subprocess UNKNOWN = 0 PASSED = 1 FAILED = 2 CRASHED = 3 PARTIAL = 4 OUT_OF_SCOPE = 5 class RowData: def __init__(self, name, flags): self.name = name self.flags = flags parser = argparse.ArgumentParser() parser.add_argument('input', help='CSV file') parser.add_argument('output', help='SVG file') args = parser.parse_args() rows = [] with open(args.input, 'r') as f: for row in csv.reader(f): if row[0] == 'title': continue file_name = row[0] flags = [int(row[1]), int(row[2]), int(row[3]), int(row[4]), int(row[5]), int(row[6]), int(row[7]), int(row[8]), int(row[9])] rows.append(RowData(file_name, flags)) passed = [0, 0, 0, 0, 0, 0, 0, 0, 0] for row in rows: for idx, flag in enumerate(row.flags): if flag == PASSED or flag == UNKNOWN: passed[idx] = passed[idx] + 1 barh_data = json.dumps( { "items_font": { "family": "Arial", "size": 12 }, "items": [ { "name": "resvg 0.14.1", "value": passed[2] }, { "name": "Chromium r856583", "value": passed[0] }, { "name": "Firefox 87", "value": passed[1] }, { "name": "Inkscape 1.0.2", "value": passed[4] }, { "name": "librsvg 2.51.1", "value": passed[5] }, { "name": "Batik 1.14", "value": passed[3] }, { "name": "SVG.NET 3.2.3", "value": passed[6] }, { "name": "QtSvg 5.15.2", "value": passed[8] }, { "name": "wxSvg 1.5.11", "value": passed[7] } ], "hor_axis": { "title": "Tests passed", "round_tick_values": True, "width": 700, "max_value": len(rows) } }, indent=4) with open('chart.json', 'w') as f: f.write(barh_data) try: subprocess.check_call(['./barh', 'chart.json', 'site/images/' + args.output]) except FileNotFoundError: print('Error: \'barh\' executable is not found.\n' 'You should build https://github.com/RazrFalcon/barh ' 'and link resultig binary to the current directory.')
true
true
f724837677091a92b829fe1dee99ea0c985b84da
4,804
py
Python
docs/conf.py
cartovarc/mac-to-ip
94098096297fe3d64022ecc850923d3cdc9691b1
[ "MIT" ]
null
null
null
docs/conf.py
cartovarc/mac-to-ip
94098096297fe3d64022ecc850923d3cdc9691b1
[ "MIT" ]
null
null
null
docs/conf.py
cartovarc/mac-to-ip
94098096297fe3d64022ecc850923d3cdc9691b1
[ "MIT" ]
null
null
null
#!/usr/bin/env python # # mac_to_ip documentation build configuration file, created by # sphinx-quickstart on Fri Jun 9 13:47:02 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('..')) import mac_to_ip # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'MAC to IP' copyright = "2021, Carlos Tovar" author = "Carlos Tovar" # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. version = mac_to_ip.__version__ # The full version, including alpha/beta/rc tags. release = mac_to_ip.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Options for HTMLHelp output --------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'mac_to_ipdoc' # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto, manual, or own class]). latex_documents = [ (master_doc, 'mac_to_ip.tex', 'MAC to IP Documentation', 'Carlos Tovar', 'manual'), ] # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'mac_to_ip', 'MAC to IP Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'mac_to_ip', 'MAC to IP Documentation', author, 'mac_to_ip', 'One line description of project.', 'Miscellaneous'), ]
29.472393
77
0.684013
import os import sys sys.path.insert(0, os.path.abspath('..')) import mac_to_ip extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = 'MAC to IP' copyright = "2021, Carlos Tovar" author = "Carlos Tovar" # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. version = mac_to_ip.__version__ # The full version, including alpha/beta/rc tags. release = mac_to_ip.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Options for HTMLHelp output --------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'mac_to_ipdoc' # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto, manual, or own class]). latex_documents = [ (master_doc, 'mac_to_ip.tex', 'MAC to IP Documentation', 'Carlos Tovar', 'manual'), ] # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'mac_to_ip', 'MAC to IP Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'mac_to_ip', 'MAC to IP Documentation', author, 'mac_to_ip', 'One line description of project.', 'Miscellaneous'), ]
true
true
f72483c20810353d6391f7a8ef332c319e49ecf0
2,956
py
Python
pygyver/etl/ometria.py
madedotcom/pygyver
77da52570951c4ddaba3d60f36a82c64828a9121
[ "MIT" ]
1
2020-11-25T11:33:11.000Z
2020-11-25T11:33:11.000Z
pygyver/etl/ometria.py
madedotcom/pygyver
77da52570951c4ddaba3d60f36a82c64828a9121
[ "MIT" ]
21
2020-04-17T17:21:05.000Z
2021-05-21T13:34:51.000Z
pygyver/etl/ometria.py
madedotcom/pygyver
77da52570951c4ddaba3d60f36a82c64828a9121
[ "MIT" ]
2
2021-03-05T14:13:00.000Z
2021-12-30T18:36:40.000Z
""" Ometria API http://docs.ometria.com/apis/data_api_v2/ - the env vars for the authentication are stored on the kubernetes cluster under 'ometria-access-credentials' - functionality: set api credentials send custom events """ import logging import os import requests class OmetriaExecutor: """ Ometria API handler. Args: env: switch between environments in Ometria, 'staging' - for testing 'prod_marketing' - for marketing emails 'prod_service' - for service emails Attributes: api_endpoint: the base API endpoint api_key: required for authentication api_headers: header to be included in the request payload: the formatted payload to be sent response: the response from the API call Returns: OmetriaExecutor object """ def __init__(self, env: str): """ Initiate and collect API credentials. """ self.env = env self.api_endpoint = "https://api.ometria.com/v2" self.api_key = None self.api_headers = None self.payload = None self.response = None self.set_api_credentials() def set_api_credentials(self): """ Collect API credentials depending on the environment. """ # api key if self.env == "staging": api_key_env_var = "OMETRIA_STAGING_API_KEY" elif self.env == "prod_marketing": api_key_env_var = "OMETRIA_MARKETING_API_KEY" elif self.env == "prod_service": api_key_env_var = "OMETRIA_SERVICE_API_KEY" else: raise KeyError(f"Unknown env - {self.env}") if api_key_env_var in os.environ: self.api_key = os.getenv(api_key_env_var) logging.info("API credentials set") else: raise KeyError(f"Env var {api_key_env_var} does not exist") # headers self.api_headers = { "X-Ometria-Auth": self.api_key, "Content-Type": "application/json" } def send_custom_events(self): """ Send custom_event type of payload to Ometria, save the API response. """ if self.payload: # check if payload length is valid - 100 items per send payload_len = len(self.payload) if payload_len <= 100: # request - not adding retry for POST request self.response = requests.post( json=self.payload, url=f"{self.api_endpoint}/push", headers=self.api_headers ) logging.info(f"Sent {payload_len} 'custom_events' items") self.payload = None else: raise ValueError( f"Payload too big - {payload_len}, max 100 items" ) else: logging.info("No send - empty payload")
30.163265
76
0.579838
import logging import os import requests class OmetriaExecutor: def __init__(self, env: str): self.env = env self.api_endpoint = "https://api.ometria.com/v2" self.api_key = None self.api_headers = None self.payload = None self.response = None self.set_api_credentials() def set_api_credentials(self): if self.env == "staging": api_key_env_var = "OMETRIA_STAGING_API_KEY" elif self.env == "prod_marketing": api_key_env_var = "OMETRIA_MARKETING_API_KEY" elif self.env == "prod_service": api_key_env_var = "OMETRIA_SERVICE_API_KEY" else: raise KeyError(f"Unknown env - {self.env}") if api_key_env_var in os.environ: self.api_key = os.getenv(api_key_env_var) logging.info("API credentials set") else: raise KeyError(f"Env var {api_key_env_var} does not exist") self.api_headers = { "X-Ometria-Auth": self.api_key, "Content-Type": "application/json" } def send_custom_events(self): if self.payload: payload_len = len(self.payload) if payload_len <= 100: self.response = requests.post( json=self.payload, url=f"{self.api_endpoint}/push", headers=self.api_headers ) logging.info(f"Sent {payload_len} 'custom_events' items") self.payload = None else: raise ValueError( f"Payload too big - {payload_len}, max 100 items" ) else: logging.info("No send - empty payload")
true
true
f724849d81b90b9dba25cf9ce12481c3d66b2f7c
38,765
py
Python
airflow/models/baseoperator.py
subrays/airflow
3c8c0b3b6411762a4e4977e519374d9fb16b541d
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
airflow/models/baseoperator.py
subrays/airflow
3c8c0b3b6411762a4e4977e519374d9fb16b541d
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
airflow/models/baseoperator.py
subrays/airflow
3c8c0b3b6411762a4e4977e519374d9fb16b541d
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from abc import ABCMeta, abstractmethod from cached_property import cached_property import copy import functools import logging import sys import warnings from datetime import timedelta, datetime from typing import Callable, Dict, Iterable, List, Optional, Set import jinja2 import six from airflow import configuration, settings from airflow.exceptions import AirflowException from airflow.lineage import prepare_lineage, apply_lineage, DataSet from airflow.models.dag import DAG from airflow.models.taskinstance import TaskInstance, clear_task_instances from airflow.models.xcom import XCOM_RETURN_KEY from airflow.ti_deps.deps.not_in_retry_period_dep import NotInRetryPeriodDep from airflow.ti_deps.deps.prev_dagrun_dep import PrevDagrunDep from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep from airflow.utils import timezone from airflow.utils.db import provide_session from airflow.utils.decorators import apply_defaults from airflow.utils.helpers import validate_key from airflow.utils.log.logging_mixin import LoggingMixin from airflow.utils.operator_resources import Resources from airflow.utils.trigger_rule import TriggerRule from airflow.utils.weight_rule import WeightRule @functools.total_ordering class BaseOperator(LoggingMixin): """ Abstract base class for all operators. Since operators create objects that become nodes in the dag, BaseOperator contains many recursive methods for dag crawling behavior. To derive this class, you are expected to override the constructor as well as the 'execute' method. Operators derived from this class should perform or trigger certain tasks synchronously (wait for completion). Example of operators could be an operator that runs a Pig job (PigOperator), a sensor operator that waits for a partition to land in Hive (HiveSensorOperator), or one that moves data from Hive to MySQL (Hive2MySqlOperator). Instances of these operators (tasks) target specific operations, running specific scripts, functions or data transfers. This class is abstract and shouldn't be instantiated. Instantiating a class derived from this one results in the creation of a task object, which ultimately becomes a node in DAG objects. Task dependencies should be set by using the set_upstream and/or set_downstream methods. :param task_id: a unique, meaningful id for the task :type task_id: str :param owner: the owner of the task, using the unix username is recommended :type owner: str :param retries: the number of retries that should be performed before failing the task :type retries: int :param retry_delay: delay between retries :type retry_delay: datetime.timedelta :param retry_exponential_backoff: allow progressive longer waits between retries by using exponential backoff algorithm on retry delay (delay will be converted into seconds) :type retry_exponential_backoff: bool :param max_retry_delay: maximum delay interval between retries :type max_retry_delay: datetime.timedelta :param start_date: The ``start_date`` for the task, determines the ``execution_date`` for the first task instance. The best practice is to have the start_date rounded to your DAG's ``schedule_interval``. Daily jobs have their start_date some day at 00:00:00, hourly jobs have their start_date at 00:00 of a specific hour. Note that Airflow simply looks at the latest ``execution_date`` and adds the ``schedule_interval`` to determine the next ``execution_date``. It is also very important to note that different tasks' dependencies need to line up in time. If task A depends on task B and their start_date are offset in a way that their execution_date don't line up, A's dependencies will never be met. If you are looking to delay a task, for example running a daily task at 2AM, look into the ``TimeSensor`` and ``TimeDeltaSensor``. We advise against using dynamic ``start_date`` and recommend using fixed ones. Read the FAQ entry about start_date for more information. :type start_date: datetime.datetime :param end_date: if specified, the scheduler won't go beyond this date :type end_date: datetime.datetime :param depends_on_past: when set to true, task instances will run sequentially while relying on the previous task's schedule to succeed. The task instance for the start_date is allowed to run. :type depends_on_past: bool :param wait_for_downstream: when set to true, an instance of task X will wait for tasks immediately downstream of the previous instance of task X to finish successfully before it runs. This is useful if the different instances of a task X alter the same asset, and this asset is used by tasks downstream of task X. Note that depends_on_past is forced to True wherever wait_for_downstream is used. :type wait_for_downstream: bool :param queue: which queue to target when running this job. Not all executors implement queue management, the CeleryExecutor does support targeting specific queues. :type queue: str :param dag: a reference to the dag the task is attached to (if any) :type dag: airflow.models.DAG :param priority_weight: priority weight of this task against other task. This allows the executor to trigger higher priority tasks before others when things get backed up. Set priority_weight as a higher number for more important tasks. :type priority_weight: int :param weight_rule: weighting method used for the effective total priority weight of the task. Options are: ``{ downstream | upstream | absolute }`` default is ``downstream`` When set to ``downstream`` the effective weight of the task is the aggregate sum of all downstream descendants. As a result, upstream tasks will have higher weight and will be scheduled more aggressively when using positive weight values. This is useful when you have multiple dag run instances and desire to have all upstream tasks to complete for all runs before each dag can continue processing downstream tasks. When set to ``upstream`` the effective weight is the aggregate sum of all upstream ancestors. This is the opposite where downtream tasks have higher weight and will be scheduled more aggressively when using positive weight values. This is useful when you have multiple dag run instances and prefer to have each dag complete before starting upstream tasks of other dags. When set to ``absolute``, the effective weight is the exact ``priority_weight`` specified without additional weighting. You may want to do this when you know exactly what priority weight each task should have. Additionally, when set to ``absolute``, there is bonus effect of significantly speeding up the task creation process as for very large DAGS. Options can be set as string or using the constants defined in the static class ``airflow.utils.WeightRule`` :type weight_rule: str :param pool: the slot pool this task should run in, slot pools are a way to limit concurrency for certain tasks :type pool: str :param sla: time by which the job is expected to succeed. Note that this represents the ``timedelta`` after the period is closed. For example if you set an SLA of 1 hour, the scheduler would send an email soon after 1:00AM on the ``2016-01-02`` if the ``2016-01-01`` instance has not succeeded yet. The scheduler pays special attention for jobs with an SLA and sends alert emails for sla misses. SLA misses are also recorded in the database for future reference. All tasks that share the same SLA time get bundled in a single email, sent soon after that time. SLA notification are sent once and only once for each task instance. :type sla: datetime.timedelta :param execution_timeout: max time allowed for the execution of this task instance, if it goes beyond it will raise and fail. :type execution_timeout: datetime.timedelta :param on_failure_callback: a function to be called when a task instance of this task fails. a context dictionary is passed as a single parameter to this function. Context contains references to related objects to the task instance and is documented under the macros section of the API. :type on_failure_callback: callable :param on_retry_callback: much like the ``on_failure_callback`` except that it is executed when retries occur. :type on_retry_callback: callable :param on_success_callback: much like the ``on_failure_callback`` except that it is executed when the task succeeds. :type on_success_callback: callable :param trigger_rule: defines the rule by which dependencies are applied for the task to get triggered. Options are: ``{ all_success | all_failed | all_done | one_success | one_failed | none_failed | none_skipped | dummy}`` default is ``all_success``. Options can be set as string or using the constants defined in the static class ``airflow.utils.TriggerRule`` :type trigger_rule: str :param resources: A map of resource parameter names (the argument names of the Resources constructor) to their values. :type resources: dict :param run_as_user: unix username to impersonate while running the task :type run_as_user: str :param task_concurrency: When set, a task will be able to limit the concurrent runs across execution_dates :type task_concurrency: int :param executor_config: Additional task-level configuration parameters that are interpreted by a specific executor. Parameters are namespaced by the name of executor. **Example**: to run this task in a specific docker container through the KubernetesExecutor :: MyOperator(..., executor_config={ "KubernetesExecutor": {"image": "myCustomDockerImage"} } ) :type executor_config: dict :param do_xcom_push: if True, an XCom is pushed containing the Operator's result :type do_xcom_push: bool """ # For derived classes to define which fields will get jinjaified template_fields = [] # type: Iterable[str] # Defines which files extensions to look for in the templated fields template_ext = [] # type: Iterable[str] # Defines the color in the UI ui_color = '#fff' ui_fgcolor = '#000' # base list which includes all the attrs that don't need deep copy. _base_operator_shallow_copy_attrs = ('user_defined_macros', 'user_defined_filters', 'params', '_log',) # each operator should override this class attr for shallow copy attrs. shallow_copy_attrs = () # type: Iterable[str] # Defines the operator level extra links operator_extra_links = () # type: Iterable[BaseOperatorLink] @apply_defaults def __init__( self, task_id: str, owner: str = configuration.conf.get('operators', 'DEFAULT_OWNER'), email: Optional[str] = None, email_on_retry: bool = True, email_on_failure: bool = True, retries: int = 0, retry_delay: timedelta = timedelta(seconds=300), retry_exponential_backoff: bool = False, max_retry_delay: Optional[datetime] = None, start_date: Optional[datetime] = None, end_date: Optional[datetime] = None, schedule_interval=None, # not hooked as of now depends_on_past: bool = False, wait_for_downstream: bool = False, dag: Optional[DAG] = None, params: Optional[Dict] = None, default_args: Optional[Dict] = None, priority_weight: int = 1, weight_rule: str = WeightRule.DOWNSTREAM, queue: str = configuration.conf.get('celery', 'default_queue'), pool: Optional[str] = None, sla: Optional[timedelta] = None, execution_timeout: Optional[timedelta] = None, on_failure_callback: Optional[Callable] = None, on_success_callback: Optional[Callable] = None, on_retry_callback: Optional[Callable] = None, trigger_rule: str = TriggerRule.ALL_SUCCESS, resources: Optional[Dict] = None, run_as_user: Optional[str] = None, task_concurrency: Optional[int] = None, executor_config: Optional[Dict] = None, do_xcom_push: bool = True, inlets: Optional[Dict] = None, outlets: Optional[Dict] = None, *args, **kwargs ): if args or kwargs: # TODO remove *args and **kwargs in Airflow 2.0 warnings.warn( 'Invalid arguments were passed to {c} (task_id: {t}). ' 'Support for passing such arguments will be dropped in ' 'Airflow 2.0. Invalid arguments were:' '\n*args: {a}\n**kwargs: {k}'.format( c=self.__class__.__name__, a=args, k=kwargs, t=task_id), category=PendingDeprecationWarning, stacklevel=3 ) validate_key(task_id) self.task_id = task_id self.owner = owner self.email = email self.email_on_retry = email_on_retry self.email_on_failure = email_on_failure self.start_date = start_date if start_date and not isinstance(start_date, datetime): self.log.warning("start_date for %s isn't datetime.datetime", self) elif start_date: self.start_date = timezone.convert_to_utc(start_date) self.end_date = end_date if end_date: self.end_date = timezone.convert_to_utc(end_date) if not TriggerRule.is_valid(trigger_rule): raise AirflowException( "The trigger_rule must be one of {all_triggers}," "'{d}.{t}'; received '{tr}'." .format(all_triggers=TriggerRule.all_triggers(), d=dag.dag_id if dag else "", t=task_id, tr=trigger_rule)) self.trigger_rule = trigger_rule self.depends_on_past = depends_on_past self.wait_for_downstream = wait_for_downstream if wait_for_downstream: self.depends_on_past = True if schedule_interval: self.log.warning( "schedule_interval is used for %s, though it has " "been deprecated as a task parameter, you need to " "specify it as a DAG parameter instead", self ) self._schedule_interval = schedule_interval self.retries = retries self.queue = queue self.pool = pool self.sla = sla self.execution_timeout = execution_timeout self.on_failure_callback = on_failure_callback self.on_success_callback = on_success_callback self.on_retry_callback = on_retry_callback if isinstance(retry_delay, timedelta): self.retry_delay = retry_delay else: self.log.debug("Retry_delay isn't timedelta object, assuming secs") self.retry_delay = timedelta(seconds=retry_delay) self.retry_exponential_backoff = retry_exponential_backoff self.max_retry_delay = max_retry_delay self.params = params or {} # Available in templates! self.priority_weight = priority_weight if not WeightRule.is_valid(weight_rule): raise AirflowException( "The weight_rule must be one of {all_weight_rules}," "'{d}.{t}'; received '{tr}'." .format(all_weight_rules=WeightRule.all_weight_rules, d=dag.dag_id if dag else "", t=task_id, tr=weight_rule)) self.weight_rule = weight_rule self.resources = Resources(**(resources or {})) self.run_as_user = run_as_user self.task_concurrency = task_concurrency self.executor_config = executor_config or {} self.do_xcom_push = do_xcom_push # Private attributes self._upstream_task_ids = set() # type: Set[str] self._downstream_task_ids = set() # type: Set[str] if not dag and settings.CONTEXT_MANAGER_DAG: dag = settings.CONTEXT_MANAGER_DAG if dag: self.dag = dag self._log = logging.getLogger("airflow.task.operators") # lineage self.inlets = [] # type: List[DataSet] self.outlets = [] # type: List[DataSet] self.lineage_data = None self._inlets = { "auto": False, "task_ids": [], "datasets": [], } self._outlets = { "datasets": [], } # type: Dict if inlets: self._inlets.update(inlets) if outlets: self._outlets.update(outlets) self._comps = { 'task_id', 'dag_id', 'owner', 'email', 'email_on_retry', 'retry_delay', 'retry_exponential_backoff', 'max_retry_delay', 'start_date', 'schedule_interval', 'depends_on_past', 'wait_for_downstream', 'priority_weight', 'sla', 'execution_timeout', 'on_failure_callback', 'on_success_callback', 'on_retry_callback', 'do_xcom_push', } def __eq__(self, other): if (type(self) == type(other) and self.task_id == other.task_id): return all(self.__dict__.get(c, None) == other.__dict__.get(c, None) for c in self._comps) return False def __ne__(self, other): return not self == other def __lt__(self, other): return self.task_id < other.task_id def __hash__(self): hash_components = [type(self)] for c in self._comps: val = getattr(self, c, None) try: hash(val) hash_components.append(val) except TypeError: hash_components.append(repr(val)) return hash(tuple(hash_components)) # Composing Operators ----------------------------------------------- def __rshift__(self, other): """ Implements Self >> Other == self.set_downstream(other) If "Other" is a DAG, the DAG is assigned to the Operator. """ if isinstance(other, DAG): # if this dag is already assigned, do nothing # otherwise, do normal dag assignment if not (self.has_dag() and self.dag is other): self.dag = other else: self.set_downstream(other) return other def __lshift__(self, other): """ Implements Self << Other == self.set_upstream(other) If "Other" is a DAG, the DAG is assigned to the Operator. """ if isinstance(other, DAG): # if this dag is already assigned, do nothing # otherwise, do normal dag assignment if not (self.has_dag() and self.dag is other): self.dag = other else: self.set_upstream(other) return other def __rrshift__(self, other): """ Called for [DAG] >> [Operator] because DAGs don't have __rshift__ operators. """ self.__lshift__(other) return self def __rlshift__(self, other): """ Called for [DAG] << [Operator] because DAGs don't have __lshift__ operators. """ self.__rshift__(other) return self # /Composing Operators --------------------------------------------- @property def dag(self): """ Returns the Operator's DAG if set, otherwise raises an error """ if self.has_dag(): return self._dag else: raise AirflowException( 'Operator {} has not been assigned to a DAG yet'.format(self)) @dag.setter def dag(self, dag): """ Operators can be assigned to one DAG, one time. Repeat assignments to that same DAG are ok. """ if not isinstance(dag, DAG): raise TypeError( 'Expected DAG; received {}'.format(dag.__class__.__name__)) elif self.has_dag() and self.dag is not dag: raise AirflowException( "The DAG assigned to {} can not be changed.".format(self)) elif self.task_id not in dag.task_dict: dag.add_task(self) self._dag = dag def has_dag(self): """ Returns True if the Operator has been assigned to a DAG. """ return getattr(self, '_dag', None) is not None @property def dag_id(self): if self.has_dag(): return self.dag.dag_id else: return 'adhoc_' + self.owner @property def deps(self): """ Returns the list of dependencies for the operator. These differ from execution context dependencies in that they are specific to tasks and can be extended/overridden by subclasses. """ return { NotInRetryPeriodDep(), PrevDagrunDep(), TriggerRuleDep(), } @property def schedule_interval(self): """ The schedule interval of the DAG always wins over individual tasks so that tasks within a DAG always line up. The task still needs a schedule_interval as it may not be attached to a DAG. """ if self.has_dag(): return self.dag._schedule_interval else: return self._schedule_interval @property def priority_weight_total(self): if self.weight_rule == WeightRule.ABSOLUTE: return self.priority_weight elif self.weight_rule == WeightRule.DOWNSTREAM: upstream = False elif self.weight_rule == WeightRule.UPSTREAM: upstream = True else: upstream = False return self.priority_weight + sum( map(lambda task_id: self._dag.task_dict[task_id].priority_weight, self.get_flat_relative_ids(upstream=upstream)) ) @cached_property def operator_extra_link_dict(self): return {link.name: link for link in self.operator_extra_links} @cached_property def global_operator_extra_link_dict(self): from airflow.plugins_manager import global_operator_extra_links return {link.name: link for link in global_operator_extra_links} @prepare_lineage def pre_execute(self, context): """ This hook is triggered right before self.execute() is called. """ def execute(self, context): """ This is the main method to derive when creating an operator. Context is the same dictionary used as when rendering jinja templates. Refer to get_template_context for more context. """ raise NotImplementedError() @apply_lineage def post_execute(self, context, result=None): """ This hook is triggered right after self.execute() is called. It is passed the execution context and any results returned by the operator. """ def on_kill(self): """ Override this method to cleanup subprocesses when a task instance gets killed. Any use of the threading, subprocess or multiprocessing module within an operator needs to be cleaned up or it will leave ghost processes behind. """ def __deepcopy__(self, memo): """ Hack sorting double chained task lists by task_id to avoid hitting max_depth on deepcopy operations. """ sys.setrecursionlimit(5000) # TODO fix this in a better way cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result shallow_copy = cls.shallow_copy_attrs + cls._base_operator_shallow_copy_attrs for k, v in self.__dict__.items(): if k not in shallow_copy: setattr(result, k, copy.deepcopy(v, memo)) else: setattr(result, k, copy.copy(v)) return result def __getstate__(self): state = dict(self.__dict__) del state['_log'] return state def __setstate__(self, state): self.__dict__ = state self._log = logging.getLogger("airflow.task.operators") def render_template_from_field(self, attr, content, context, jinja_env): """ Renders a template from a field. If the field is a string, it will simply render the string and return the result. If it is a collection or nested set of collections, it will traverse the structure and render all elements in it. If the field has another type, it will return it as it is. """ rt = self.render_template if isinstance(content, six.string_types): result = jinja_env.from_string(content).render(**context) elif isinstance(content, (list, tuple)): result = [rt(attr, e, context) for e in content] elif isinstance(content, dict): result = { k: rt("{}[{}]".format(attr, k), v, context) for k, v in list(content.items())} else: result = content return result def render_template(self, attr, content, context): """ Renders a template either from a file or directly in a field, and returns the rendered result. """ jinja_env = self.get_template_env() exts = self.__class__.template_ext if ( isinstance(content, six.string_types) and any([content.endswith(ext) for ext in exts])): return jinja_env.get_template(content).render(**context) else: return self.render_template_from_field(attr, content, context, jinja_env) def get_template_env(self): return self.dag.get_template_env() \ if hasattr(self, 'dag') \ else jinja2.Environment(cache_size=0) def prepare_template(self): """ Hook that is triggered after the templated fields get replaced by their content. If you need your operator to alter the content of the file before the template is rendered, it should override this method to do so. """ def resolve_template_files(self): # Getting the content of files for template_field / template_ext for attr in self.template_fields: content = getattr(self, attr) if content is None: continue elif isinstance(content, six.string_types) and \ any([content.endswith(ext) for ext in self.template_ext]): env = self.get_template_env() try: setattr(self, attr, env.loader.get_source(env, content)[0]) except Exception as e: self.log.exception(e) elif isinstance(content, list): env = self.dag.get_template_env() for i in range(len(content)): if isinstance(content[i], six.string_types) and \ any([content[i].endswith(ext) for ext in self.template_ext]): try: content[i] = env.loader.get_source(env, content[i])[0] except Exception as e: self.log.exception(e) self.prepare_template() @property def upstream_list(self): """@property: list of tasks directly upstream""" return [self.dag.get_task(tid) for tid in self._upstream_task_ids] @property def upstream_task_ids(self): return self._upstream_task_ids @property def downstream_list(self): """@property: list of tasks directly downstream""" return [self.dag.get_task(tid) for tid in self._downstream_task_ids] @property def downstream_task_ids(self): return self._downstream_task_ids @provide_session def clear(self, start_date=None, end_date=None, upstream=False, downstream=False, session=None): """ Clears the state of task instances associated with the task, following the parameters specified. """ TI = TaskInstance qry = session.query(TI).filter(TI.dag_id == self.dag_id) if start_date: qry = qry.filter(TI.execution_date >= start_date) if end_date: qry = qry.filter(TI.execution_date <= end_date) tasks = [self.task_id] if upstream: tasks += [ t.task_id for t in self.get_flat_relatives(upstream=True)] if downstream: tasks += [ t.task_id for t in self.get_flat_relatives(upstream=False)] qry = qry.filter(TI.task_id.in_(tasks)) count = qry.count() clear_task_instances(qry.all(), session, dag=self.dag) session.commit() return count @provide_session def get_task_instances(self, start_date=None, end_date=None, session=None): """ Get a set of task instance related to this task for a specific date range. """ end_date = end_date or timezone.utcnow() return session.query(TaskInstance)\ .filter(TaskInstance.dag_id == self.dag_id)\ .filter(TaskInstance.task_id == self.task_id)\ .filter(TaskInstance.execution_date >= start_date)\ .filter(TaskInstance.execution_date <= end_date)\ .order_by(TaskInstance.execution_date)\ .all() def get_flat_relative_ids(self, upstream=False, found_descendants=None): """ Get a flat list of relatives' ids, either upstream or downstream. """ if not found_descendants: found_descendants = set() relative_ids = self.get_direct_relative_ids(upstream) for relative_id in relative_ids: if relative_id not in found_descendants: found_descendants.add(relative_id) relative_task = self._dag.task_dict[relative_id] relative_task.get_flat_relative_ids(upstream, found_descendants) return found_descendants def get_flat_relatives(self, upstream=False): """ Get a flat list of relatives, either upstream or downstream. """ return list(map(lambda task_id: self._dag.task_dict[task_id], self.get_flat_relative_ids(upstream))) def run( self, start_date=None, end_date=None, ignore_first_depends_on_past=False, ignore_ti_state=False, mark_success=False): """ Run a set of task instances for a date range. """ start_date = start_date or self.start_date end_date = end_date or self.end_date or timezone.utcnow() for dt in self.dag.date_range(start_date, end_date=end_date): TaskInstance(self, dt).run( mark_success=mark_success, ignore_depends_on_past=( dt == start_date and ignore_first_depends_on_past), ignore_ti_state=ignore_ti_state) def dry_run(self): self.log.info('Dry run') for attr in self.template_fields: content = getattr(self, attr) if content and isinstance(content, six.string_types): self.log.info('Rendering template for %s', attr) self.log.info(content) def get_direct_relative_ids(self, upstream=False): """ Get the direct relative ids to the current task, upstream or downstream. """ if upstream: return self._upstream_task_ids else: return self._downstream_task_ids def get_direct_relatives(self, upstream=False): """ Get the direct relatives to the current task, upstream or downstream. """ if upstream: return self.upstream_list else: return self.downstream_list def __repr__(self): return "<Task({self.__class__.__name__}): {self.task_id}>".format( self=self) @property def task_type(self): return self.__class__.__name__ def add_only_new(self, item_set, item): if item in item_set: self.log.warning( 'Dependency {self}, {item} already registered' ''.format(self=self, item=item)) else: item_set.add(item) def _set_relatives(self, task_or_task_list, upstream=False): try: task_list = list(task_or_task_list) except TypeError: task_list = [task_or_task_list] for t in task_list: if not isinstance(t, BaseOperator): raise AirflowException( "Relationships can only be set between " "Operators; received {}".format(t.__class__.__name__)) # relationships can only be set if the tasks share a single DAG. Tasks # without a DAG are assigned to that DAG. dags = {t._dag.dag_id: t._dag for t in [self] + task_list if t.has_dag()} if len(dags) > 1: raise AirflowException( 'Tried to set relationships between tasks in ' 'more than one DAG: {}'.format(dags.values())) elif len(dags) == 1: dag = dags.popitem()[1] else: raise AirflowException( "Tried to create relationships between tasks that don't have " "DAGs yet. Set the DAG for at least one " "task and try again: {}".format([self] + task_list)) if dag and not self.has_dag(): self.dag = dag for task in task_list: if dag and not task.has_dag(): task.dag = dag if upstream: task.add_only_new(task.get_direct_relative_ids(upstream=False), self.task_id) self.add_only_new(self._upstream_task_ids, task.task_id) else: self.add_only_new(self._downstream_task_ids, task.task_id) task.add_only_new(task.get_direct_relative_ids(upstream=True), self.task_id) def set_downstream(self, task_or_task_list): """ Set a task or a task list to be directly downstream from the current task. """ self._set_relatives(task_or_task_list, upstream=False) def set_upstream(self, task_or_task_list): """ Set a task or a task list to be directly upstream from the current task. """ self._set_relatives(task_or_task_list, upstream=True) def xcom_push( self, context, key, value, execution_date=None): """ See TaskInstance.xcom_push() """ context['ti'].xcom_push( key=key, value=value, execution_date=execution_date) def xcom_pull( self, context, task_ids=None, dag_id=None, key=XCOM_RETURN_KEY, include_prior_dates=None): """ See TaskInstance.xcom_pull() """ return context['ti'].xcom_pull( key=key, task_ids=task_ids, dag_id=dag_id, include_prior_dates=include_prior_dates) @cached_property def extra_links(self) -> Iterable[str]: return list(set(self.operator_extra_link_dict.keys()) .union(self.global_operator_extra_link_dict.keys())) def get_extra_links(self, dttm, link_name): """ For an operator, gets the URL that the external links specified in `extra_links` should point to. :raise ValueError: The error message of a ValueError will be passed on through to the fronted to show up as a tooltip on the disabled link :param dttm: The datetime parsed execution date for the URL being searched for :param link_name: The name of the link we're looking for the URL for. Should be one of the options specified in `extra_links` :return: A URL """ if link_name in self.operator_extra_link_dict: return self.operator_extra_link_dict[link_name].get_link(self, dttm) elif link_name in self.global_operator_extra_link_dict: return self.global_operator_extra_link_dict[link_name].get_link(self, dttm) class BaseOperatorLink(metaclass=ABCMeta): """ Abstract base class that defines how we get an operator link. """ @property @abstractmethod def name(self) -> str: """ Name of the link. This will be the button name on the task UI. :return: link name """ @abstractmethod def get_link(self, operator: BaseOperator, dttm: datetime) -> str: """ Link to external system. :param operator: airflow operator :param dttm: datetime :return: link to external system """
38.765
102
0.629176
from abc import ABCMeta, abstractmethod from cached_property import cached_property import copy import functools import logging import sys import warnings from datetime import timedelta, datetime from typing import Callable, Dict, Iterable, List, Optional, Set import jinja2 import six from airflow import configuration, settings from airflow.exceptions import AirflowException from airflow.lineage import prepare_lineage, apply_lineage, DataSet from airflow.models.dag import DAG from airflow.models.taskinstance import TaskInstance, clear_task_instances from airflow.models.xcom import XCOM_RETURN_KEY from airflow.ti_deps.deps.not_in_retry_period_dep import NotInRetryPeriodDep from airflow.ti_deps.deps.prev_dagrun_dep import PrevDagrunDep from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep from airflow.utils import timezone from airflow.utils.db import provide_session from airflow.utils.decorators import apply_defaults from airflow.utils.helpers import validate_key from airflow.utils.log.logging_mixin import LoggingMixin from airflow.utils.operator_resources import Resources from airflow.utils.trigger_rule import TriggerRule from airflow.utils.weight_rule import WeightRule @functools.total_ordering class BaseOperator(LoggingMixin): template_fields = [] template_ext = [] ui_color = '#fff' ui_fgcolor = '#000' _base_operator_shallow_copy_attrs = ('user_defined_macros', 'user_defined_filters', 'params', '_log',) # each operator should override this class attr for shallow copy attrs. shallow_copy_attrs = () # type: Iterable[str] # Defines the operator level extra links operator_extra_links = () # type: Iterable[BaseOperatorLink] @apply_defaults def __init__( self, task_id: str, owner: str = configuration.conf.get('operators', 'DEFAULT_OWNER'), email: Optional[str] = None, email_on_retry: bool = True, email_on_failure: bool = True, retries: int = 0, retry_delay: timedelta = timedelta(seconds=300), retry_exponential_backoff: bool = False, max_retry_delay: Optional[datetime] = None, start_date: Optional[datetime] = None, end_date: Optional[datetime] = None, schedule_interval=None, # not hooked as of now depends_on_past: bool = False, wait_for_downstream: bool = False, dag: Optional[DAG] = None, params: Optional[Dict] = None, default_args: Optional[Dict] = None, priority_weight: int = 1, weight_rule: str = WeightRule.DOWNSTREAM, queue: str = configuration.conf.get('celery', 'default_queue'), pool: Optional[str] = None, sla: Optional[timedelta] = None, execution_timeout: Optional[timedelta] = None, on_failure_callback: Optional[Callable] = None, on_success_callback: Optional[Callable] = None, on_retry_callback: Optional[Callable] = None, trigger_rule: str = TriggerRule.ALL_SUCCESS, resources: Optional[Dict] = None, run_as_user: Optional[str] = None, task_concurrency: Optional[int] = None, executor_config: Optional[Dict] = None, do_xcom_push: bool = True, inlets: Optional[Dict] = None, outlets: Optional[Dict] = None, *args, **kwargs ): if args or kwargs: # TODO remove *args and **kwargs in Airflow 2.0 warnings.warn( 'Invalid arguments were passed to {c} (task_id: {t}). ' 'Support for passing such arguments will be dropped in ' 'Airflow 2.0. Invalid arguments were:' '\n*args: {a}\n**kwargs: {k}'.format( c=self.__class__.__name__, a=args, k=kwargs, t=task_id), category=PendingDeprecationWarning, stacklevel=3 ) validate_key(task_id) self.task_id = task_id self.owner = owner self.email = email self.email_on_retry = email_on_retry self.email_on_failure = email_on_failure self.start_date = start_date if start_date and not isinstance(start_date, datetime): self.log.warning("start_date for %s isn't datetime.datetime", self) elif start_date: self.start_date = timezone.convert_to_utc(start_date) self.end_date = end_date if end_date: self.end_date = timezone.convert_to_utc(end_date) if not TriggerRule.is_valid(trigger_rule): raise AirflowException( "The trigger_rule must be one of {all_triggers}," "'{d}.{t}'; received '{tr}'." .format(all_triggers=TriggerRule.all_triggers(), d=dag.dag_id if dag else "", t=task_id, tr=trigger_rule)) self.trigger_rule = trigger_rule self.depends_on_past = depends_on_past self.wait_for_downstream = wait_for_downstream if wait_for_downstream: self.depends_on_past = True if schedule_interval: self.log.warning( "schedule_interval is used for %s, though it has " "been deprecated as a task parameter, you need to " "specify it as a DAG parameter instead", self ) self._schedule_interval = schedule_interval self.retries = retries self.queue = queue self.pool = pool self.sla = sla self.execution_timeout = execution_timeout self.on_failure_callback = on_failure_callback self.on_success_callback = on_success_callback self.on_retry_callback = on_retry_callback if isinstance(retry_delay, timedelta): self.retry_delay = retry_delay else: self.log.debug("Retry_delay isn't timedelta object, assuming secs") self.retry_delay = timedelta(seconds=retry_delay) self.retry_exponential_backoff = retry_exponential_backoff self.max_retry_delay = max_retry_delay self.params = params or {} # Available in templates! self.priority_weight = priority_weight if not WeightRule.is_valid(weight_rule): raise AirflowException( "The weight_rule must be one of {all_weight_rules}," "'{d}.{t}'; received '{tr}'." .format(all_weight_rules=WeightRule.all_weight_rules, d=dag.dag_id if dag else "", t=task_id, tr=weight_rule)) self.weight_rule = weight_rule self.resources = Resources(**(resources or {})) self.run_as_user = run_as_user self.task_concurrency = task_concurrency self.executor_config = executor_config or {} self.do_xcom_push = do_xcom_push # Private attributes self._upstream_task_ids = set() # type: Set[str] self._downstream_task_ids = set() # type: Set[str] if not dag and settings.CONTEXT_MANAGER_DAG: dag = settings.CONTEXT_MANAGER_DAG if dag: self.dag = dag self._log = logging.getLogger("airflow.task.operators") # lineage self.inlets = [] # type: List[DataSet] self.outlets = [] # type: List[DataSet] self.lineage_data = None self._inlets = { "auto": False, "task_ids": [], "datasets": [], } self._outlets = { "datasets": [], } # type: Dict if inlets: self._inlets.update(inlets) if outlets: self._outlets.update(outlets) self._comps = { 'task_id', 'dag_id', 'owner', 'email', 'email_on_retry', 'retry_delay', 'retry_exponential_backoff', 'max_retry_delay', 'start_date', 'schedule_interval', 'depends_on_past', 'wait_for_downstream', 'priority_weight', 'sla', 'execution_timeout', 'on_failure_callback', 'on_success_callback', 'on_retry_callback', 'do_xcom_push', } def __eq__(self, other): if (type(self) == type(other) and self.task_id == other.task_id): return all(self.__dict__.get(c, None) == other.__dict__.get(c, None) for c in self._comps) return False def __ne__(self, other): return not self == other def __lt__(self, other): return self.task_id < other.task_id def __hash__(self): hash_components = [type(self)] for c in self._comps: val = getattr(self, c, None) try: hash(val) hash_components.append(val) except TypeError: hash_components.append(repr(val)) return hash(tuple(hash_components)) # Composing Operators ----------------------------------------------- def __rshift__(self, other): if isinstance(other, DAG): # if this dag is already assigned, do nothing # otherwise, do normal dag assignment if not (self.has_dag() and self.dag is other): self.dag = other else: self.set_downstream(other) return other def __lshift__(self, other): if isinstance(other, DAG): # if this dag is already assigned, do nothing # otherwise, do normal dag assignment if not (self.has_dag() and self.dag is other): self.dag = other else: self.set_upstream(other) return other def __rrshift__(self, other): self.__lshift__(other) return self def __rlshift__(self, other): self.__rshift__(other) return self # /Composing Operators --------------------------------------------- @property def dag(self): if self.has_dag(): return self._dag else: raise AirflowException( 'Operator {} has not been assigned to a DAG yet'.format(self)) @dag.setter def dag(self, dag): if not isinstance(dag, DAG): raise TypeError( 'Expected DAG; received {}'.format(dag.__class__.__name__)) elif self.has_dag() and self.dag is not dag: raise AirflowException( "The DAG assigned to {} can not be changed.".format(self)) elif self.task_id not in dag.task_dict: dag.add_task(self) self._dag = dag def has_dag(self): return getattr(self, '_dag', None) is not None @property def dag_id(self): if self.has_dag(): return self.dag.dag_id else: return 'adhoc_' + self.owner @property def deps(self): return { NotInRetryPeriodDep(), PrevDagrunDep(), TriggerRuleDep(), } @property def schedule_interval(self): if self.has_dag(): return self.dag._schedule_interval else: return self._schedule_interval @property def priority_weight_total(self): if self.weight_rule == WeightRule.ABSOLUTE: return self.priority_weight elif self.weight_rule == WeightRule.DOWNSTREAM: upstream = False elif self.weight_rule == WeightRule.UPSTREAM: upstream = True else: upstream = False return self.priority_weight + sum( map(lambda task_id: self._dag.task_dict[task_id].priority_weight, self.get_flat_relative_ids(upstream=upstream)) ) @cached_property def operator_extra_link_dict(self): return {link.name: link for link in self.operator_extra_links} @cached_property def global_operator_extra_link_dict(self): from airflow.plugins_manager import global_operator_extra_links return {link.name: link for link in global_operator_extra_links} @prepare_lineage def pre_execute(self, context): def execute(self, context): raise NotImplementedError() @apply_lineage def post_execute(self, context, result=None): def on_kill(self): def __deepcopy__(self, memo): sys.setrecursionlimit(5000) # TODO fix this in a better way cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result shallow_copy = cls.shallow_copy_attrs + cls._base_operator_shallow_copy_attrs for k, v in self.__dict__.items(): if k not in shallow_copy: setattr(result, k, copy.deepcopy(v, memo)) else: setattr(result, k, copy.copy(v)) return result def __getstate__(self): state = dict(self.__dict__) del state['_log'] return state def __setstate__(self, state): self.__dict__ = state self._log = logging.getLogger("airflow.task.operators") def render_template_from_field(self, attr, content, context, jinja_env): rt = self.render_template if isinstance(content, six.string_types): result = jinja_env.from_string(content).render(**context) elif isinstance(content, (list, tuple)): result = [rt(attr, e, context) for e in content] elif isinstance(content, dict): result = { k: rt("{}[{}]".format(attr, k), v, context) for k, v in list(content.items())} else: result = content return result def render_template(self, attr, content, context): jinja_env = self.get_template_env() exts = self.__class__.template_ext if ( isinstance(content, six.string_types) and any([content.endswith(ext) for ext in exts])): return jinja_env.get_template(content).render(**context) else: return self.render_template_from_field(attr, content, context, jinja_env) def get_template_env(self): return self.dag.get_template_env() \ if hasattr(self, 'dag') \ else jinja2.Environment(cache_size=0) def prepare_template(self): def resolve_template_files(self): # Getting the content of files for template_field / template_ext for attr in self.template_fields: content = getattr(self, attr) if content is None: continue elif isinstance(content, six.string_types) and \ any([content.endswith(ext) for ext in self.template_ext]): env = self.get_template_env() try: setattr(self, attr, env.loader.get_source(env, content)[0]) except Exception as e: self.log.exception(e) elif isinstance(content, list): env = self.dag.get_template_env() for i in range(len(content)): if isinstance(content[i], six.string_types) and \ any([content[i].endswith(ext) for ext in self.template_ext]): try: content[i] = env.loader.get_source(env, content[i])[0] except Exception as e: self.log.exception(e) self.prepare_template() @property def upstream_list(self): return [self.dag.get_task(tid) for tid in self._upstream_task_ids] @property def upstream_task_ids(self): return self._upstream_task_ids @property def downstream_list(self): return [self.dag.get_task(tid) for tid in self._downstream_task_ids] @property def downstream_task_ids(self): return self._downstream_task_ids @provide_session def clear(self, start_date=None, end_date=None, upstream=False, downstream=False, session=None): TI = TaskInstance qry = session.query(TI).filter(TI.dag_id == self.dag_id) if start_date: qry = qry.filter(TI.execution_date >= start_date) if end_date: qry = qry.filter(TI.execution_date <= end_date) tasks = [self.task_id] if upstream: tasks += [ t.task_id for t in self.get_flat_relatives(upstream=True)] if downstream: tasks += [ t.task_id for t in self.get_flat_relatives(upstream=False)] qry = qry.filter(TI.task_id.in_(tasks)) count = qry.count() clear_task_instances(qry.all(), session, dag=self.dag) session.commit() return count @provide_session def get_task_instances(self, start_date=None, end_date=None, session=None): end_date = end_date or timezone.utcnow() return session.query(TaskInstance)\ .filter(TaskInstance.dag_id == self.dag_id)\ .filter(TaskInstance.task_id == self.task_id)\ .filter(TaskInstance.execution_date >= start_date)\ .filter(TaskInstance.execution_date <= end_date)\ .order_by(TaskInstance.execution_date)\ .all() def get_flat_relative_ids(self, upstream=False, found_descendants=None): if not found_descendants: found_descendants = set() relative_ids = self.get_direct_relative_ids(upstream) for relative_id in relative_ids: if relative_id not in found_descendants: found_descendants.add(relative_id) relative_task = self._dag.task_dict[relative_id] relative_task.get_flat_relative_ids(upstream, found_descendants) return found_descendants def get_flat_relatives(self, upstream=False): return list(map(lambda task_id: self._dag.task_dict[task_id], self.get_flat_relative_ids(upstream))) def run( self, start_date=None, end_date=None, ignore_first_depends_on_past=False, ignore_ti_state=False, mark_success=False): start_date = start_date or self.start_date end_date = end_date or self.end_date or timezone.utcnow() for dt in self.dag.date_range(start_date, end_date=end_date): TaskInstance(self, dt).run( mark_success=mark_success, ignore_depends_on_past=( dt == start_date and ignore_first_depends_on_past), ignore_ti_state=ignore_ti_state) def dry_run(self): self.log.info('Dry run') for attr in self.template_fields: content = getattr(self, attr) if content and isinstance(content, six.string_types): self.log.info('Rendering template for %s', attr) self.log.info(content) def get_direct_relative_ids(self, upstream=False): if upstream: return self._upstream_task_ids else: return self._downstream_task_ids def get_direct_relatives(self, upstream=False): if upstream: return self.upstream_list else: return self.downstream_list def __repr__(self): return "<Task({self.__class__.__name__}): {self.task_id}>".format( self=self) @property def task_type(self): return self.__class__.__name__ def add_only_new(self, item_set, item): if item in item_set: self.log.warning( 'Dependency {self}, {item} already registered' ''.format(self=self, item=item)) else: item_set.add(item) def _set_relatives(self, task_or_task_list, upstream=False): try: task_list = list(task_or_task_list) except TypeError: task_list = [task_or_task_list] for t in task_list: if not isinstance(t, BaseOperator): raise AirflowException( "Relationships can only be set between " "Operators; received {}".format(t.__class__.__name__)) # relationships can only be set if the tasks share a single DAG. Tasks # without a DAG are assigned to that DAG. dags = {t._dag.dag_id: t._dag for t in [self] + task_list if t.has_dag()} if len(dags) > 1: raise AirflowException( 'Tried to set relationships between tasks in ' 'more than one DAG: {}'.format(dags.values())) elif len(dags) == 1: dag = dags.popitem()[1] else: raise AirflowException( "Tried to create relationships between tasks that don't have " "DAGs yet. Set the DAG for at least one " "task and try again: {}".format([self] + task_list)) if dag and not self.has_dag(): self.dag = dag for task in task_list: if dag and not task.has_dag(): task.dag = dag if upstream: task.add_only_new(task.get_direct_relative_ids(upstream=False), self.task_id) self.add_only_new(self._upstream_task_ids, task.task_id) else: self.add_only_new(self._downstream_task_ids, task.task_id) task.add_only_new(task.get_direct_relative_ids(upstream=True), self.task_id) def set_downstream(self, task_or_task_list): self._set_relatives(task_or_task_list, upstream=False) def set_upstream(self, task_or_task_list): self._set_relatives(task_or_task_list, upstream=True) def xcom_push( self, context, key, value, execution_date=None): context['ti'].xcom_push( key=key, value=value, execution_date=execution_date) def xcom_pull( self, context, task_ids=None, dag_id=None, key=XCOM_RETURN_KEY, include_prior_dates=None): return context['ti'].xcom_pull( key=key, task_ids=task_ids, dag_id=dag_id, include_prior_dates=include_prior_dates) @cached_property def extra_links(self) -> Iterable[str]: return list(set(self.operator_extra_link_dict.keys()) .union(self.global_operator_extra_link_dict.keys())) def get_extra_links(self, dttm, link_name): if link_name in self.operator_extra_link_dict: return self.operator_extra_link_dict[link_name].get_link(self, dttm) elif link_name in self.global_operator_extra_link_dict: return self.global_operator_extra_link_dict[link_name].get_link(self, dttm) class BaseOperatorLink(metaclass=ABCMeta): @property @abstractmethod def name(self) -> str: @abstractmethod def get_link(self, operator: BaseOperator, dttm: datetime) -> str:
true
true
f72484a7592ee8ca18c8b0897a938b18606428a4
6,800
py
Python
bindings/python/ensmallen_graph/datasets/string/paraprevotellaxylaniphila.py
caufieldjh/ensmallen_graph
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
[ "MIT" ]
null
null
null
bindings/python/ensmallen_graph/datasets/string/paraprevotellaxylaniphila.py
caufieldjh/ensmallen_graph
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
[ "MIT" ]
null
null
null
bindings/python/ensmallen_graph/datasets/string/paraprevotellaxylaniphila.py
caufieldjh/ensmallen_graph
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
[ "MIT" ]
null
null
null
""" This file offers the methods to automatically retrieve the graph Paraprevotella xylaniphila. The graph is automatically retrieved from the STRING repository. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 21:42:47.483688 The undirected graph Paraprevotella xylaniphila has 3396 nodes and 309111 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.05362 and has 22 connected components, where the component with most nodes has 3350 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 157, the mean node degree is 182.04, and the node degree mode is 4. The top 5 most central nodes are 762982.HMPREF9442_02244 (degree 997), 762982.HMPREF9442_00670 (degree 895), 762982.HMPREF9442_01031 (degree 865), 762982.HMPREF9442_03225 (degree 794) and 762982.HMPREF9442_00174 (degree 776). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import ParaprevotellaXylaniphila # Then load the graph graph = ParaprevotellaXylaniphila() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks. """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error def ParaprevotellaXylaniphila( directed: bool = False, verbose: int = 2, cache_path: str = "graphs/string", **additional_graph_kwargs: Dict ) -> EnsmallenGraph: """Return new instance of the Paraprevotella xylaniphila graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of Paraprevotella xylaniphila graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 21:42:47.483688 The undirected graph Paraprevotella xylaniphila has 3396 nodes and 309111 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.05362 and has 22 connected components, where the component with most nodes has 3350 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 157, the mean node degree is 182.04, and the node degree mode is 4. The top 5 most central nodes are 762982.HMPREF9442_02244 (degree 997), 762982.HMPREF9442_00670 (degree 895), 762982.HMPREF9442_01031 (degree 865), 762982.HMPREF9442_03225 (degree 794) and 762982.HMPREF9442_00174 (degree 776). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import ParaprevotellaXylaniphila # Then load the graph graph = ParaprevotellaXylaniphila() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks. """ return AutomaticallyRetrievedGraph( graph_name="ParaprevotellaXylaniphila", dataset="string", directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
35.602094
223
0.708529
from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen_graph import EnsmallenGraph def ParaprevotellaXylaniphila( directed: bool = False, verbose: int = 2, cache_path: str = "graphs/string", **additional_graph_kwargs: Dict ) -> EnsmallenGraph: return AutomaticallyRetrievedGraph( graph_name="ParaprevotellaXylaniphila", dataset="string", directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
true
true
f72484fc095b2c9a1f53f18824e2c7709dcae682
874
py
Python
spec_parser/spec_parser/util.py
Parnassius/domify
262a9b9bf68fd627d963f23abb68c06f424180f2
[ "MIT" ]
null
null
null
spec_parser/spec_parser/util.py
Parnassius/domify
262a9b9bf68fd627d963f23abb68c06f424180f2
[ "MIT" ]
5
2022-03-01T19:53:28.000Z
2022-03-28T01:32:30.000Z
spec_parser/spec_parser/util.py
Parnassius/domify
262a9b9bf68fd627d963f23abb68c06f424180f2
[ "MIT" ]
1
2021-11-08T20:36:36.000Z
2021-11-08T20:36:36.000Z
from typing import Dict, List import requests from bs4 import BeautifulSoup # type: ignore[import] class _RequestCache: def __init__(self) -> None: self._cache: Dict[str, BeautifulSoup] = {} def __call__(self, page: str) -> BeautifulSoup: if page.endswith(".html"): page = page[:-5] if page not in self._cache: html = requests.get( f"https://html.spec.whatwg.org/multipage/{page}.html" ).text self._cache[page] = BeautifulSoup(html, "html5lib") return self._cache[page] request_cache = _RequestCache() def get_input_type_keywords() -> List[str]: soup = request_cache("input") table = soup.find(id="attr-input-type-keywords") keywords = [ row.contents[0].find("code").string for row in table.find("tbody").children ] return keywords
26.484848
83
0.621281
from typing import Dict, List import requests from bs4 import BeautifulSoup class _RequestCache: def __init__(self) -> None: self._cache: Dict[str, BeautifulSoup] = {} def __call__(self, page: str) -> BeautifulSoup: if page.endswith(".html"): page = page[:-5] if page not in self._cache: html = requests.get( f"https://html.spec.whatwg.org/multipage/{page}.html" ).text self._cache[page] = BeautifulSoup(html, "html5lib") return self._cache[page] request_cache = _RequestCache() def get_input_type_keywords() -> List[str]: soup = request_cache("input") table = soup.find(id="attr-input-type-keywords") keywords = [ row.contents[0].find("code").string for row in table.find("tbody").children ] return keywords
true
true
f72485a6ca32241a57f395404a0c19eded7aae2d
415
py
Python
Data_Structures/Maps_Hash_Dict/DivisiblePairCount2.py
neha07kumari/algo_ds_101
b5f87feb4aac5ad45d934a609e7e73eedf280f10
[ "MIT" ]
1
2022-02-11T19:25:01.000Z
2022-02-11T19:25:01.000Z
Data_Structures/Maps_Hash_Dict/DivisiblePairCount2.py
neha07kumari/algo_ds_101
b5f87feb4aac5ad45d934a609e7e73eedf280f10
[ "MIT" ]
2
2020-10-13T06:49:54.000Z
2020-10-17T07:16:37.000Z
Data_Structures/Maps_Hash_Dict/DivisiblePairCount2.py
neha07kumari/algo_ds_101
b5f87feb4aac5ad45d934a609e7e73eedf280f10
[ "MIT" ]
14
2020-10-13T04:20:57.000Z
2021-10-01T16:16:13.000Z
def DivisiblePairCount(arr) : count = 0 k = len(arr) for i in range(0, k): for j in range(i+1, k): if (arr[i] % arr[j] == 0 or arr[j] % arr[i] == 0): count += 1 return count if __name__ == "__main__": #give input in form of a list -- [1,2,3] arr = [int(item) for item in ''.join(list(input())[1:-1]).split(',')] print(DivisiblePairCount(arr))
24.411765
73
0.508434
def DivisiblePairCount(arr) : count = 0 k = len(arr) for i in range(0, k): for j in range(i+1, k): if (arr[i] % arr[j] == 0 or arr[j] % arr[i] == 0): count += 1 return count if __name__ == "__main__": arr = [int(item) for item in ''.join(list(input())[1:-1]).split(',')] print(DivisiblePairCount(arr))
true
true
f72485e95740971bc6b7f5bd9e29a91909acdc48
92
py
Python
project_mysql/sales/modelsa.py
righ/djangomodel2alchemymap
c156cd14ff7bfd7d858449819072c18059ecdcd0
[ "MIT" ]
17
2019-08-20T16:58:18.000Z
2022-01-15T05:00:52.000Z
project_mysql/sales/modelsa.py
righ/djangomodel2alchemymap
c156cd14ff7bfd7d858449819072c18059ecdcd0
[ "MIT" ]
4
2020-06-02T00:14:38.000Z
2021-10-14T16:45:13.000Z
project_mysql/sales/modelsa.py
righ/djangomodel2alchemymap
c156cd14ff7bfd7d858449819072c18059ecdcd0
[ "MIT" ]
2
2019-12-17T13:15:48.000Z
2021-04-27T09:09:46.000Z
from d2a import transfer from . import models transfer(models, globals(), db_type='mysql')
18.4
44
0.76087
from d2a import transfer from . import models transfer(models, globals(), db_type='mysql')
true
true
f724861c26fdb7becc18c6a8a70a39ab6cf71c08
6,782
py
Python
sdk/lusid/models/resource_list_of_get_counterparty_response.py
mneedham/lusid-sdk-python-preview
f4494009d1a2f3431d931c813cab679bdbd92c84
[ "MIT" ]
null
null
null
sdk/lusid/models/resource_list_of_get_counterparty_response.py
mneedham/lusid-sdk-python-preview
f4494009d1a2f3431d931c813cab679bdbd92c84
[ "MIT" ]
null
null
null
sdk/lusid/models/resource_list_of_get_counterparty_response.py
mneedham/lusid-sdk-python-preview
f4494009d1a2f3431d931c813cab679bdbd92c84
[ "MIT" ]
null
null
null
# coding: utf-8 """ LUSID API FINBOURNE Technology # noqa: E501 The version of the OpenAPI document: 0.11.3192 Contact: info@finbourne.com Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six class ResourceListOfGetCounterpartyResponse(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. required_map (dict): The key is attribute name and the value is whether it is 'required' or 'optional'. """ openapi_types = { 'values': 'list[GetCounterpartyResponse]', 'href': 'str', 'links': 'list[Link]', 'next_page': 'str', 'previous_page': 'str' } attribute_map = { 'values': 'values', 'href': 'href', 'links': 'links', 'next_page': 'nextPage', 'previous_page': 'previousPage' } required_map = { 'values': 'required', 'href': 'optional', 'links': 'optional', 'next_page': 'optional', 'previous_page': 'optional' } def __init__(self, values=None, href=None, links=None, next_page=None, previous_page=None): # noqa: E501 """ ResourceListOfGetCounterpartyResponse - a model defined in OpenAPI :param values: (required) :type values: list[lusid.GetCounterpartyResponse] :param href: :type href: str :param links: :type links: list[lusid.Link] :param next_page: :type next_page: str :param previous_page: :type previous_page: str """ # noqa: E501 self._values = None self._href = None self._links = None self._next_page = None self._previous_page = None self.discriminator = None self.values = values self.href = href self.links = links self.next_page = next_page self.previous_page = previous_page @property def values(self): """Gets the values of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :return: The values of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :rtype: list[GetCounterpartyResponse] """ return self._values @values.setter def values(self, values): """Sets the values of this ResourceListOfGetCounterpartyResponse. :param values: The values of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :type: list[GetCounterpartyResponse] """ if values is None: raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501 self._values = values @property def href(self): """Gets the href of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :return: The href of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :rtype: str """ return self._href @href.setter def href(self, href): """Sets the href of this ResourceListOfGetCounterpartyResponse. :param href: The href of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :type: str """ self._href = href @property def links(self): """Gets the links of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :return: The links of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :rtype: list[Link] """ return self._links @links.setter def links(self, links): """Sets the links of this ResourceListOfGetCounterpartyResponse. :param links: The links of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :type: list[Link] """ self._links = links @property def next_page(self): """Gets the next_page of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :return: The next_page of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :rtype: str """ return self._next_page @next_page.setter def next_page(self, next_page): """Sets the next_page of this ResourceListOfGetCounterpartyResponse. :param next_page: The next_page of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :type: str """ self._next_page = next_page @property def previous_page(self): """Gets the previous_page of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :return: The previous_page of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :rtype: str """ return self._previous_page @previous_page.setter def previous_page(self, previous_page): """Sets the previous_page of this ResourceListOfGetCounterpartyResponse. :param previous_page: The previous_page of this ResourceListOfGetCounterpartyResponse. # noqa: E501 :type: str """ self._previous_page = previous_page def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResourceListOfGetCounterpartyResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
28.495798
109
0.599823
import pprint import re import six class ResourceListOfGetCounterpartyResponse(object): openapi_types = { 'values': 'list[GetCounterpartyResponse]', 'href': 'str', 'links': 'list[Link]', 'next_page': 'str', 'previous_page': 'str' } attribute_map = { 'values': 'values', 'href': 'href', 'links': 'links', 'next_page': 'nextPage', 'previous_page': 'previousPage' } required_map = { 'values': 'required', 'href': 'optional', 'links': 'optional', 'next_page': 'optional', 'previous_page': 'optional' } def __init__(self, values=None, href=None, links=None, next_page=None, previous_page=None): self._values = None self._href = None self._links = None self._next_page = None self._previous_page = None self.discriminator = None self.values = values self.href = href self.links = links self.next_page = next_page self.previous_page = previous_page @property def values(self): return self._values @values.setter def values(self, values): if values is None: raise ValueError("Invalid value for `values`, must not be `None`") self._values = values @property def href(self): return self._href @href.setter def href(self, href): self._href = href @property def links(self): return self._links @links.setter def links(self, links): self._links = links @property def next_page(self): return self._next_page @next_page.setter def next_page(self, next_page): self._next_page = next_page @property def previous_page(self): return self._previous_page @previous_page.setter def previous_page(self, previous_page): self._previous_page = previous_page def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, ResourceListOfGetCounterpartyResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
true
true
f72487670d1fd26ffb29aca415344110891be0c5
4,468
py
Python
solidata_api/_models/models_dataset_output.py
entrepreneur-interet-general/solidata_backend
08ba9151069f2f633461f5166b1954fdeac7854a
[ "MIT" ]
7
2018-12-13T09:50:26.000Z
2022-03-25T23:59:26.000Z
solidata_api/_models/models_dataset_output.py
entrepreneur-interet-general/solidata_backend
08ba9151069f2f633461f5166b1954fdeac7854a
[ "MIT" ]
35
2018-10-16T09:06:40.000Z
2019-03-27T11:12:11.000Z
solidata_api/_models/models_dataset_output.py
entrepreneur-interet-general/solidata_backend
08ba9151069f2f633461f5166b1954fdeac7854a
[ "MIT" ]
2
2019-02-16T15:19:05.000Z
2019-02-19T19:27:44.000Z
# -*- encoding: utf-8 -*- """ _models/models_dataset_outputs.py """ from log_config import log, pformat log.debug("... loading models_dataset_outputs.py ...") from flask_restplus import fields ### import data serializers from solidata_api._serializers.schema_logs import * from solidata_api._serializers.schema_generic import * # from solidata_api._serializers.schema_projects import * ### import generic models functions from solidata_api._models.models_generic import * ### create models from serializers # nested models : https://github.com/noirbizarre/flask-restplus/issues/8 # model_user_infos = ns.model( "User model", user_infos) #, mask="{name,surname,email}" ) class NewDso : """ Model to display / marshal dso basic form """ def __init__(self, ns_): self.mod = ns_.model( "Dso_basics", {**doc_basics_licence, **open_level_edit_show} ) @property def model(self): return self.mod class Dso_infos : """ Model to display / marshal dataset output """ def __init__(self, ns_) : model_type = "Dso" ### SELF MODULES self._id = oid_field self.basic_infos = create_model_basic_infos( ns_, model_name=model_type+"_infos", need_licence=True) self.public_auth = create_model_public_auth( ns_, model_name=model_type+"_public_auth") self.specs = create_model_specs( ns_, model_name=model_type+"_specs", ) self.log = create_model_log( ns_, model_name=model_type+"_log", include_is_running=True, include_is_loaded=True ) self.modif_log = create_model_modif_log( ns_, model_name=model_type+"_modif_log") self.uses = create_model_uses( ns_, model_name=model_type+"_uses", schema_list=[ "usr", "prj" ]) self.uses_light = create_model_uses( ns_, model_name=model_type+"_uses", schema_list=[ "prj" ]) self.datasets = create_model_datasets( ns_, model_name=model_type+"_datasets", schema_list=[ "dsi","tag" ]) self.datasets_light = create_model_datasets( ns_, model_name=model_type+"_datasets", schema_list=[ "dsi","tag" ], is_light=True ) self.translations = create_model_translations(ns_, model_name=model_type+"_translations") self.team = create_model_team( ns_, model_name=model_type+"_team") self.team_light = create_model_team( ns_, model_name=model_type+"_team", is_light=True) self.data_raw = create_model_data_raw( ns_, model_name=model_type+"_data_raw", schema="dso" ) self.model_id = { '_id' : self._id, } self.model_in = { 'modif_log' : self.modif_log , "datasets" : self.datasets , } self.model_min = { 'infos' : self.basic_infos, 'public_auth' : self.public_auth, 'specs' : self.specs , 'log' : self.log , 'translations' : self.translations, } self.mod_data_raw ={ 'data_raw' : self.data_raw, } self.model_team_full = { 'team' : self.team , } self.model_team_light = { 'team' : self.team_light, } self.model_uses = { 'uses' : self.uses, } self.model_uses_light = { 'uses' : self.uses_light, } self.model_datasets_light = { 'datasets' : self.datasets_light, } ### IN / complete data to enter in DB self.mod_complete_in = ns_.model(model_type+"_in", { **self.model_min, **self.model_in, **self.model_team_full, **self.model_uses, **self.mod_data_raw, } ) ### OUT COMPLETE / complete data to get out of DB self.mod_complete_out = ns_.model(model_type+"_out", { **self.model_min, **self.model_in, **self.model_id, **self.model_team_full, **self.model_uses, **self.mod_data_raw, } ) ### OUT GUEST / complete data to get out of DB self.mod_guest_out = ns_.model(model_type+"_guest_out", { **self.model_min, **self.model_in, **self.model_id, **self.model_team_light, **self.model_uses_light, **self.mod_data_raw, } ) ### MIN / minimum data to marshall out self.mod_minimum = ns_.model(model_type+"_minimum", { **self.model_min, **self.model_id, **self.model_uses_light, **self.model_datasets_light, **self.mod_data_raw, } ) @property def model_complete_in(self): return self.mod_complete_in @property def model_complete_out(self): return self.mod_complete_out @property def model_guest_out(self): return self.mod_guest_out @property def model_minimum(self): return self.mod_minimum
26.282353
133
0.673679
from log_config import log, pformat log.debug("... loading models_dataset_outputs.py ...") from flask_restplus import fields t * from solidata_api._serializers.schema_generic import * open_level_edit_show} ) @property def model(self): return self.mod class Dso_infos : def __init__(self, ns_) : model_type = "Dso" self.basic_infos = create_model_basic_infos( ns_, model_name=model_type+"_infos", need_licence=True) self.public_auth = create_model_public_auth( ns_, model_name=model_type+"_public_auth") self.specs = create_model_specs( ns_, model_name=model_type+"_specs", ) self.log = create_model_log( ns_, model_name=model_type+"_log", include_is_running=True, include_is_loaded=True ) self.modif_log = create_model_modif_log( ns_, model_name=model_type+"_modif_log") self.uses = create_model_uses( ns_, model_name=model_type+"_uses", schema_list=[ "usr", "prj" ]) self.uses_light = create_model_uses( ns_, model_name=model_type+"_uses", schema_list=[ "prj" ]) self.datasets = create_model_datasets( ns_, model_name=model_type+"_datasets", schema_list=[ "dsi","tag" ]) self.datasets_light = create_model_datasets( ns_, model_name=model_type+"_datasets", schema_list=[ "dsi","tag" ], is_light=True ) self.translations = create_model_translations(ns_, model_name=model_type+"_translations") self.team = create_model_team( ns_, model_name=model_type+"_team") self.team_light = create_model_team( ns_, model_name=model_type+"_team", is_light=True) self.data_raw = create_model_data_raw( ns_, model_name=model_type+"_data_raw", schema="dso" ) self.model_id = { '_id' : self._id, } self.model_in = { 'modif_log' : self.modif_log , "datasets" : self.datasets , } self.model_min = { 'infos' : self.basic_infos, 'public_auth' : self.public_auth, 'specs' : self.specs , 'log' : self.log , 'translations' : self.translations, } self.mod_data_raw ={ 'data_raw' : self.data_raw, } self.model_team_full = { 'team' : self.team , } self.model_team_light = { 'team' : self.team_light, } self.model_uses = { 'uses' : self.uses, } self.model_uses_light = { 'uses' : self.uses_light, } self.model_datasets_light = { 'datasets' : self.datasets_light, } f.model_min, **self.model_in, **self.model_team_full, **self.model_uses, **self.mod_data_raw, } ) lf.model_in, **self.model_id, **self.model_team_full, **self.model_uses, **self.mod_data_raw, } ) **self.model_in, **self.model_id, **self.model_team_light, **self.model_uses_light, **self.mod_data_raw, } ) odel_min, **self.model_id, **self.model_uses_light, **self.model_datasets_light, **self.mod_data_raw, } ) @property def model_complete_in(self): return self.mod_complete_in @property def model_complete_out(self): return self.mod_complete_out @property def model_guest_out(self): return self.mod_guest_out @property def model_minimum(self): return self.mod_minimum
true
true
f72487c1401c258eeaef80d0bad2132c073531cf
8,204
py
Python
openapi_client/api/dc_graph_get_report_depot_utilization_v1_api.py
vertica/vertica-accelerator-cli
706925f58a4bfc2876903396db72363f673be76a
[ "Apache-2.0" ]
null
null
null
openapi_client/api/dc_graph_get_report_depot_utilization_v1_api.py
vertica/vertica-accelerator-cli
706925f58a4bfc2876903396db72363f673be76a
[ "Apache-2.0" ]
null
null
null
openapi_client/api/dc_graph_get_report_depot_utilization_v1_api.py
vertica/vertica-accelerator-cli
706925f58a4bfc2876903396db72363f673be76a
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ VAAS API No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: 0.0.1 Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from openapi_client.api_client import ApiClient from openapi_client.exceptions import ( # noqa: F401 ApiTypeError, ApiValueError ) class DcGraphGetReportDepotUtilizationV1Api(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def v1_vaas_reports_dbname_depot_utilization_get(self, dbname, module, **kwargs): # noqa: E501 """v1_vaas_reports_dbname_depot_utilization_get # noqa: E501 Get a dc report from certain database. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_vaas_reports_dbname_depot_utilization_get(dbname, module, async_req=True) >>> result = thread.get() :param dbname: (required) :type dbname: str :param module: Name of the module. (required) :type module: str :param time_range: Time range for the report. :type time_range: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: InlineResponse200 """ kwargs['_return_http_data_only'] = True return self.v1_vaas_reports_dbname_depot_utilization_get_with_http_info(dbname, module, **kwargs) # noqa: E501 def v1_vaas_reports_dbname_depot_utilization_get_with_http_info(self, dbname, module, **kwargs): # noqa: E501 """v1_vaas_reports_dbname_depot_utilization_get # noqa: E501 Get a dc report from certain database. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_vaas_reports_dbname_depot_utilization_get_with_http_info(dbname, module, async_req=True) >>> result = thread.get() :param dbname: (required) :type dbname: str :param module: Name of the module. (required) :type module: str :param time_range: Time range for the report. :type time_range: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: tuple(InlineResponse200, status_code(int), headers(HTTPHeaderDict)) """ local_var_params = locals() all_params = [ 'dbname', 'module', 'time_range' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_vaas_reports_dbname_depot_utilization_get" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'dbname' is set if self.api_client.client_side_validation and ('dbname' not in local_var_params or # noqa: E501 local_var_params['dbname'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `dbname` when calling `v1_vaas_reports_dbname_depot_utilization_get`") # noqa: E501 # verify the required parameter 'module' is set if self.api_client.client_side_validation and ('module' not in local_var_params or # noqa: E501 local_var_params['module'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `module` when calling `v1_vaas_reports_dbname_depot_utilization_get`") # noqa: E501 collection_formats = {} path_params = {} if 'dbname' in local_var_params: path_params['dbname'] = local_var_params['dbname'] # noqa: E501 query_params = [] if 'module' in local_var_params and local_var_params['module'] is not None: # noqa: E501 query_params.append(('module', local_var_params['module'])) # noqa: E501 if 'time_range' in local_var_params and local_var_params['time_range'] is not None: # noqa: E501 query_params.append(('time_range', local_var_params['time_range'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['*/*', 'application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = { 200: "InlineResponse200", 408: None, 500: None, } return self.api_client.call_api( '/v1/vaas/reports/{dbname}/depot-utilization', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth'))
42.507772
148
0.615553
from __future__ import absolute_import import re import six from openapi_client.api_client import ApiClient from openapi_client.exceptions import ( ApiTypeError, ApiValueError ) class DcGraphGetReportDepotUtilizationV1Api(object): def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def v1_vaas_reports_dbname_depot_utilization_get(self, dbname, module, **kwargs): kwargs['_return_http_data_only'] = True return self.v1_vaas_reports_dbname_depot_utilization_get_with_http_info(dbname, module, **kwargs) def v1_vaas_reports_dbname_depot_utilization_get_with_http_info(self, dbname, module, **kwargs): local_var_params = locals() all_params = [ 'dbname', 'module', 'time_range' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_vaas_reports_dbname_depot_utilization_get" % key ) local_var_params[key] = val del local_var_params['kwargs'] if self.api_client.client_side_validation and ('dbname' not in local_var_params or local_var_params['dbname'] is None): raise ApiValueError("Missing the required parameter `dbname` when calling `v1_vaas_reports_dbname_depot_utilization_get`") if self.api_client.client_side_validation and ('module' not in local_var_params or local_var_params['module'] is None): raise ApiValueError("Missing the required parameter `module` when calling `v1_vaas_reports_dbname_depot_utilization_get`") collection_formats = {} path_params = {} if 'dbname' in local_var_params: path_params['dbname'] = local_var_params['dbname'] query_params = [] if 'module' in local_var_params and local_var_params['module'] is not None: query_params.append(('module', local_var_params['module'])) if 'time_range' in local_var_params and local_var_params['time_range'] is not None: query_params.append(('time_range', local_var_params['time_range'])) header_params = {} form_params = [] local_var_files = {} body_params = None header_params['Accept'] = self.api_client.select_header_accept( ['*/*', 'application/json']) auth_settings = [] response_types_map = { 200: "InlineResponse200", 408: None, 500: None, } return self.api_client.call_api( '/v1/vaas/reports/{dbname}/depot-utilization', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth'))
true
true
f724888e65513eebbb1135160df884f5d67661ab
22,396
py
Python
pytest_django/plugin.py
oboynitro/pytest-django
e4ebc59b0037e5623706c738ef8cbf09ecd2425d
[ "BSD-3-Clause" ]
1
2020-10-23T02:46:08.000Z
2020-10-23T02:46:08.000Z
pytest_django/plugin.py
oboynitro/pytest-django
e4ebc59b0037e5623706c738ef8cbf09ecd2425d
[ "BSD-3-Clause" ]
null
null
null
pytest_django/plugin.py
oboynitro/pytest-django
e4ebc59b0037e5623706c738ef8cbf09ecd2425d
[ "BSD-3-Clause" ]
null
null
null
"""A pytest plugin which helps testing Django applications This plugin handles creating and destroying the test environment and test database and provides some useful text fixtures. """ import contextlib import inspect from functools import reduce import os import pathlib import sys import pytest from .django_compat import is_django_unittest # noqa from .fixtures import django_assert_num_queries # noqa from .fixtures import django_assert_max_num_queries # noqa from .fixtures import django_db_setup # noqa from .fixtures import django_db_use_migrations # noqa from .fixtures import django_db_keepdb # noqa from .fixtures import django_db_createdb # noqa from .fixtures import django_db_modify_db_settings # noqa from .fixtures import django_db_modify_db_settings_parallel_suffix # noqa from .fixtures import django_db_modify_db_settings_tox_suffix # noqa from .fixtures import django_db_modify_db_settings_xdist_suffix # noqa from .fixtures import _live_server_helper # noqa from .fixtures import admin_client # noqa from .fixtures import admin_user # noqa from .fixtures import async_client # noqa from .fixtures import client # noqa from .fixtures import db # noqa from .fixtures import django_user_model # noqa from .fixtures import django_username_field # noqa from .fixtures import live_server # noqa from .fixtures import django_db_reset_sequences # noqa from .fixtures import async_rf # noqa from .fixtures import rf # noqa from .fixtures import settings # noqa from .fixtures import transactional_db # noqa from .lazy_django import django_settings_is_configured, skip_if_no_django SETTINGS_MODULE_ENV = "DJANGO_SETTINGS_MODULE" CONFIGURATION_ENV = "DJANGO_CONFIGURATION" INVALID_TEMPLATE_VARS_ENV = "FAIL_INVALID_TEMPLATE_VARS" _report_header = [] # ############### pytest hooks ################ def pytest_addoption(parser): group = parser.getgroup("django") group.addoption( "--reuse-db", action="store_true", dest="reuse_db", default=False, help="Re-use the testing database if it already exists, " "and do not remove it when the test finishes.", ) group.addoption( "--create-db", action="store_true", dest="create_db", default=False, help="Re-create the database, even if it exists. This " "option can be used to override --reuse-db.", ) group.addoption( "--ds", action="store", type=str, dest="ds", default=None, help="Set DJANGO_SETTINGS_MODULE.", ) group.addoption( "--dc", action="store", type=str, dest="dc", default=None, help="Set DJANGO_CONFIGURATION.", ) group.addoption( "--nomigrations", "--no-migrations", action="store_true", dest="nomigrations", default=False, help="Disable Django migrations on test setup", ) group.addoption( "--migrations", action="store_false", dest="nomigrations", default=False, help="Enable Django migrations on test setup", ) parser.addini( CONFIGURATION_ENV, "django-configurations class to use by pytest-django." ) group.addoption( "--liveserver", default=None, help="Address and port for the live_server fixture.", ) parser.addini( SETTINGS_MODULE_ENV, "Django settings module to use by pytest-django." ) parser.addini( "django_find_project", "Automatically find and add a Django project to the " "Python path.", type="bool", default=True, ) parser.addini( "django_debug_mode", "How to set the Django DEBUG setting (default `False`). " "Use `keep` to not override.", default="False", ) group.addoption( "--fail-on-template-vars", action="store_true", dest="itv", default=False, help="Fail for invalid variables in templates.", ) parser.addini( INVALID_TEMPLATE_VARS_ENV, "Fail for invalid variables in templates.", type="bool", default=False, ) PROJECT_FOUND = ( "pytest-django found a Django project in %s " "(it contains manage.py) and added it to the Python path.\n" 'If this is wrong, add "django_find_project = false" to ' "pytest.ini and explicitly manage your Python path." ) PROJECT_NOT_FOUND = ( "pytest-django could not find a Django project " "(no manage.py file could be found). You must " "explicitly add your Django project to the Python path " "to have it picked up." ) PROJECT_SCAN_DISABLED = ( "pytest-django did not search for Django " "projects since it is disabled in the configuration " '("django_find_project = false")' ) @contextlib.contextmanager def _handle_import_error(extra_message): try: yield except ImportError as e: django_msg = (e.args[0] + "\n\n") if e.args else "" msg = django_msg + extra_message raise ImportError(msg) def _add_django_project_to_path(args): def is_django_project(path): try: return path.is_dir() and (path / "manage.py").exists() except OSError: return False def arg_to_path(arg): # Test classes or functions can be appended to paths separated by :: arg = arg.split("::", 1)[0] return pathlib.Path(arg) def find_django_path(args): args = map(str, args) args = [arg_to_path(x) for x in args if not x.startswith("-")] cwd = pathlib.Path.cwd() if not args: args.append(cwd) elif cwd not in args: args.append(cwd) for arg in args: if is_django_project(arg): return arg for parent in arg.parents: if is_django_project(parent): return parent return None project_dir = find_django_path(args) if project_dir: sys.path.insert(0, str(project_dir.absolute())) return PROJECT_FOUND % project_dir return PROJECT_NOT_FOUND def _setup_django(): if "django" not in sys.modules: return import django.conf # Avoid force-loading Django when settings are not properly configured. if not django.conf.settings.configured: return import django.apps if not django.apps.apps.ready: django.setup() _blocking_manager.block() def _get_boolean_value(x, name, default=None): if x is None: return default if x in (True, False): return x possible_values = {"true": True, "false": False, "1": True, "0": False} try: return possible_values[x.lower()] except KeyError: raise ValueError( "{} is not a valid value for {}. " "It must be one of {}.".format(x, name, ", ".join(possible_values.keys())) ) def pytest_load_initial_conftests(early_config, parser, args): # Register the marks early_config.addinivalue_line( "markers", "django_db(transaction=False): Mark the test as using " "the Django test database. The *transaction* argument marks will " "allow you to use real transactions in the test like Django's " "TransactionTestCase.", ) early_config.addinivalue_line( "markers", "urls(modstr): Use a different URLconf for this test, similar to " "the `urls` attribute of Django's `TestCase` objects. *modstr* is " "a string specifying the module of a URL config, e.g. " '"my_app.test_urls".', ) early_config.addinivalue_line( "markers", "ignore_template_errors(): ignore errors from invalid template " "variables (if --fail-on-template-vars is used).", ) options = parser.parse_known_args(args) if options.version or options.help: return django_find_project = _get_boolean_value( early_config.getini("django_find_project"), "django_find_project" ) if django_find_project: _django_project_scan_outcome = _add_django_project_to_path(args) else: _django_project_scan_outcome = PROJECT_SCAN_DISABLED if ( options.itv or _get_boolean_value( os.environ.get(INVALID_TEMPLATE_VARS_ENV), INVALID_TEMPLATE_VARS_ENV ) or early_config.getini(INVALID_TEMPLATE_VARS_ENV) ): os.environ[INVALID_TEMPLATE_VARS_ENV] = "true" def _get_option_with_source(option, envname): if option: return option, "option" if envname in os.environ: return os.environ[envname], "env" cfgval = early_config.getini(envname) if cfgval: return cfgval, "ini" return None, None ds, ds_source = _get_option_with_source(options.ds, SETTINGS_MODULE_ENV) dc, dc_source = _get_option_with_source(options.dc, CONFIGURATION_ENV) if ds: _report_header.append("settings: {} (from {})".format(ds, ds_source)) os.environ[SETTINGS_MODULE_ENV] = ds if dc: _report_header.append("configuration: {} (from {})".format(dc, dc_source)) os.environ[CONFIGURATION_ENV] = dc # Install the django-configurations importer import configurations.importer configurations.importer.install() # Forcefully load Django settings, throws ImportError or # ImproperlyConfigured if settings cannot be loaded. from django.conf import settings as dj_settings with _handle_import_error(_django_project_scan_outcome): dj_settings.DATABASES _setup_django() def pytest_report_header(): if _report_header: return ["django: " + ", ".join(_report_header)] @pytest.hookimpl(trylast=True) def pytest_configure(): # Allow Django settings to be configured in a user pytest_configure call, # but make sure we call django.setup() _setup_django() @pytest.hookimpl(tryfirst=True) def pytest_collection_modifyitems(items): # If Django is not configured we don't need to bother if not django_settings_is_configured(): return from django.test import TestCase, TransactionTestCase def get_order_number(test): if hasattr(test, "cls") and test.cls: # Beware, TestCase is a subclass of TransactionTestCase if issubclass(test.cls, TestCase): return 0 if issubclass(test.cls, TransactionTestCase): return 1 marker_db = test.get_closest_marker('django_db') if marker_db: transaction = validate_django_db(marker_db)[0] if transaction is True: return 1 else: transaction = None fixtures = getattr(test, 'fixturenames', []) if "transactional_db" in fixtures: return 1 if transaction is False: return 0 if "db" in fixtures: return 0 return 2 items[:] = sorted(items, key=get_order_number) @pytest.fixture(autouse=True, scope="session") def django_test_environment(request): """ Ensure that Django is loaded and has its testing environment setup. XXX It is a little dodgy that this is an autouse fixture. Perhaps an email fixture should be requested in order to be able to use the Django email machinery just like you need to request a db fixture for access to the Django database, etc. But without duplicating a lot more of Django's test support code we need to follow this model. """ if django_settings_is_configured(): _setup_django() from django.test.utils import setup_test_environment, teardown_test_environment debug_ini = request.config.getini("django_debug_mode") if debug_ini == "keep": debug = None else: debug = _get_boolean_value(debug_ini, False) setup_test_environment(debug=debug) request.addfinalizer(teardown_test_environment) @pytest.fixture(scope="session") def django_db_blocker(): """Wrapper around Django's database access. This object can be used to re-enable database access. This fixture is used internally in pytest-django to build the other fixtures and can be used for special database handling. The object is a context manager and provides the methods .unblock()/.block() and .restore() to temporarily enable database access. This is an advanced feature that is meant to be used to implement database fixtures. """ if not django_settings_is_configured(): return None return _blocking_manager @pytest.fixture(autouse=True) def _django_db_marker(request): """Implement the django_db marker, internal to pytest-django. This will dynamically request the ``db``, ``transactional_db`` or ``django_db_reset_sequences`` fixtures as required by the django_db marker. """ marker = request.node.get_closest_marker("django_db") if marker: transaction, reset_sequences = validate_django_db(marker) if reset_sequences: request.getfixturevalue("django_db_reset_sequences") elif transaction: request.getfixturevalue("transactional_db") else: request.getfixturevalue("db") @pytest.fixture(autouse=True, scope="class") def _django_setup_unittest(request, django_db_blocker): """Setup a django unittest, internal to pytest-django.""" if not django_settings_is_configured() or not is_django_unittest(request): yield return # Fix/patch pytest. # Before pytest 5.4: https://github.com/pytest-dev/pytest/issues/5991 # After pytest 5.4: https://github.com/pytest-dev/pytest-django/issues/824 from _pytest.unittest import TestCaseFunction original_runtest = TestCaseFunction.runtest def non_debugging_runtest(self): self._testcase(result=self) try: TestCaseFunction.runtest = non_debugging_runtest request.getfixturevalue("django_db_setup") with django_db_blocker.unblock(): yield finally: TestCaseFunction.runtest = original_runtest @pytest.fixture(scope="function", autouse=True) def _dj_autoclear_mailbox(): if not django_settings_is_configured(): return from django.core import mail del mail.outbox[:] @pytest.fixture(scope="function") def mailoutbox(django_mail_patch_dns, _dj_autoclear_mailbox): if not django_settings_is_configured(): return from django.core import mail return mail.outbox @pytest.fixture(scope="function") def django_mail_patch_dns(monkeypatch, django_mail_dnsname): from django.core import mail monkeypatch.setattr(mail.message, "DNS_NAME", django_mail_dnsname) @pytest.fixture(scope="function") def django_mail_dnsname(): return "fake-tests.example.com" @pytest.fixture(autouse=True, scope="function") def _django_set_urlconf(request): """Apply the @pytest.mark.urls marker, internal to pytest-django.""" marker = request.node.get_closest_marker("urls") if marker: skip_if_no_django() import django.conf from django.urls import clear_url_caches, set_urlconf urls = validate_urls(marker) original_urlconf = django.conf.settings.ROOT_URLCONF django.conf.settings.ROOT_URLCONF = urls clear_url_caches() set_urlconf(None) def restore(): django.conf.settings.ROOT_URLCONF = original_urlconf # Copy the pattern from # https://github.com/django/django/blob/master/django/test/signals.py#L152 clear_url_caches() set_urlconf(None) request.addfinalizer(restore) @pytest.fixture(autouse=True, scope="session") def _fail_for_invalid_template_variable(): """Fixture that fails for invalid variables in templates. This fixture will fail each test that uses django template rendering should a template contain an invalid template variable. The fail message will include the name of the invalid variable and in most cases the template name. It does not raise an exception, but fails, as the stack trace doesn't offer any helpful information to debug. This behavior can be switched off using the marker: ``pytest.mark.ignore_template_errors`` """ class InvalidVarException: """Custom handler for invalid strings in templates.""" def __init__(self): self.fail = True def __contains__(self, key): return key == "%s" @staticmethod def _get_origin(): stack = inspect.stack() # Try to use topmost `self.origin` first (Django 1.9+, and with # TEMPLATE_DEBUG).. for f in stack[2:]: func = f[3] if func == "render": frame = f[0] try: origin = frame.f_locals["self"].origin except (AttributeError, KeyError): continue if origin is not None: return origin from django.template import Template # finding the ``render`` needle in the stack frame = reduce( lambda x, y: y[3] == "render" and "base.py" in y[1] and y or x, stack ) # assert 0, stack frame = frame[0] # finding only the frame locals in all frame members f_locals = reduce( lambda x, y: y[0] == "f_locals" and y or x, inspect.getmembers(frame) )[1] # ``django.template.base.Template`` template = f_locals["self"] if isinstance(template, Template): return template.name def __mod__(self, var): origin = self._get_origin() if origin: msg = "Undefined template variable '{}' in '{}'".format(var, origin) else: msg = "Undefined template variable '%s'" % var if self.fail: pytest.fail(msg) else: return msg if ( os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true" and django_settings_is_configured() ): from django.conf import settings as dj_settings if dj_settings.TEMPLATES: dj_settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"] = InvalidVarException() @pytest.fixture(autouse=True) def _template_string_if_invalid_marker(request): """Apply the @pytest.mark.ignore_template_errors marker, internal to pytest-django.""" marker = request.keywords.get("ignore_template_errors", None) if os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true": if marker and django_settings_is_configured(): from django.conf import settings as dj_settings if dj_settings.TEMPLATES: dj_settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"].fail = False @pytest.fixture(autouse=True, scope="function") def _django_clear_site_cache(): """Clears ``django.contrib.sites.models.SITE_CACHE`` to avoid unexpected behavior with cached site objects. """ if django_settings_is_configured(): from django.conf import settings as dj_settings if "django.contrib.sites" in dj_settings.INSTALLED_APPS: from django.contrib.sites.models import Site Site.objects.clear_cache() # ############### Helper Functions ################ class _DatabaseBlockerContextManager: def __init__(self, db_blocker): self._db_blocker = db_blocker def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): self._db_blocker.restore() class _DatabaseBlocker: """Manager for django.db.backends.base.base.BaseDatabaseWrapper. This is the object returned by django_db_blocker. """ def __init__(self): self._history = [] self._real_ensure_connection = None @property def _dj_db_wrapper(self): from django.db.backends.base.base import BaseDatabaseWrapper # The first time the _dj_db_wrapper is accessed, we will save a # reference to the real implementation. if self._real_ensure_connection is None: self._real_ensure_connection = BaseDatabaseWrapper.ensure_connection return BaseDatabaseWrapper def _save_active_wrapper(self): return self._history.append(self._dj_db_wrapper.ensure_connection) def _blocking_wrapper(*args, **kwargs): __tracebackhide__ = True __tracebackhide__ # Silence pyflakes raise RuntimeError( "Database access not allowed, " 'use the "django_db" mark, or the ' '"db" or "transactional_db" fixtures to enable it.' ) def unblock(self): """Enable access to the Django database.""" self._save_active_wrapper() self._dj_db_wrapper.ensure_connection = self._real_ensure_connection return _DatabaseBlockerContextManager(self) def block(self): """Disable access to the Django database.""" self._save_active_wrapper() self._dj_db_wrapper.ensure_connection = self._blocking_wrapper return _DatabaseBlockerContextManager(self) def restore(self): self._dj_db_wrapper.ensure_connection = self._history.pop() _blocking_manager = _DatabaseBlocker() def validate_django_db(marker): """Validate the django_db marker. It checks the signature and creates the ``transaction`` and ``reset_sequences`` attributes on the marker which will have the correct values. A sequence reset is only allowed when combined with a transaction. """ def apifun(transaction=False, reset_sequences=False): return transaction, reset_sequences return apifun(*marker.args, **marker.kwargs) def validate_urls(marker): """Validate the urls marker. It checks the signature and creates the `urls` attribute on the marker which will have the correct value. """ def apifun(urls): return urls return apifun(*marker.args, **marker.kwargs)
30.976487
92
0.655162
import contextlib import inspect from functools import reduce import os import pathlib import sys import pytest from .django_compat import is_django_unittest from .fixtures import django_assert_num_queries from .fixtures import django_assert_max_num_queries from .fixtures import django_db_setup from .fixtures import django_db_use_migrations from .fixtures import django_db_keepdb from .fixtures import django_db_createdb from .fixtures import django_db_modify_db_settings from .fixtures import django_db_modify_db_settings_parallel_suffix from .fixtures import django_db_modify_db_settings_tox_suffix from .fixtures import django_db_modify_db_settings_xdist_suffix from .fixtures import _live_server_helper from .fixtures import admin_client from .fixtures import admin_user from .fixtures import async_client from .fixtures import client from .fixtures import db from .fixtures import django_user_model from .fixtures import django_username_field from .fixtures import live_server from .fixtures import django_db_reset_sequences from .fixtures import async_rf from .fixtures import rf from .fixtures import settings from .fixtures import transactional_db from .lazy_django import django_settings_is_configured, skip_if_no_django SETTINGS_MODULE_ENV = "DJANGO_SETTINGS_MODULE" CONFIGURATION_ENV = "DJANGO_CONFIGURATION" INVALID_TEMPLATE_VARS_ENV = "FAIL_INVALID_TEMPLATE_VARS" _report_header = [] NGO_SETTINGS_MODULE.", ) group.addoption( "--dc", action="store", type=str, dest="dc", default=None, help="Set DJANGO_CONFIGURATION.", ) group.addoption( "--nomigrations", "--no-migrations", action="store_true", dest="nomigrations", default=False, help="Disable Django migrations on test setup", ) group.addoption( "--migrations", action="store_false", dest="nomigrations", default=False, help="Enable Django migrations on test setup", ) parser.addini( CONFIGURATION_ENV, "django-configurations class to use by pytest-django." ) group.addoption( "--liveserver", default=None, help="Address and port for the live_server fixture.", ) parser.addini( SETTINGS_MODULE_ENV, "Django settings module to use by pytest-django." ) parser.addini( "django_find_project", "Automatically find and add a Django project to the " "Python path.", type="bool", default=True, ) parser.addini( "django_debug_mode", "How to set the Django DEBUG setting (default `False`). " "Use `keep` to not override.", default="False", ) group.addoption( "--fail-on-template-vars", action="store_true", dest="itv", default=False, help="Fail for invalid variables in templates.", ) parser.addini( INVALID_TEMPLATE_VARS_ENV, "Fail for invalid variables in templates.", type="bool", default=False, ) PROJECT_FOUND = ( "pytest-django found a Django project in %s " "(it contains manage.py) and added it to the Python path.\n" 'If this is wrong, add "django_find_project = false" to ' "pytest.ini and explicitly manage your Python path." ) PROJECT_NOT_FOUND = ( "pytest-django could not find a Django project " "(no manage.py file could be found). You must " "explicitly add your Django project to the Python path " "to have it picked up." ) PROJECT_SCAN_DISABLED = ( "pytest-django did not search for Django " "projects since it is disabled in the configuration " '("django_find_project = false")' ) @contextlib.contextmanager def _handle_import_error(extra_message): try: yield except ImportError as e: django_msg = (e.args[0] + "\n\n") if e.args else "" msg = django_msg + extra_message raise ImportError(msg) def _add_django_project_to_path(args): def is_django_project(path): try: return path.is_dir() and (path / "manage.py").exists() except OSError: return False def arg_to_path(arg): arg = arg.split("::", 1)[0] return pathlib.Path(arg) def find_django_path(args): args = map(str, args) args = [arg_to_path(x) for x in args if not x.startswith("-")] cwd = pathlib.Path.cwd() if not args: args.append(cwd) elif cwd not in args: args.append(cwd) for arg in args: if is_django_project(arg): return arg for parent in arg.parents: if is_django_project(parent): return parent return None project_dir = find_django_path(args) if project_dir: sys.path.insert(0, str(project_dir.absolute())) return PROJECT_FOUND % project_dir return PROJECT_NOT_FOUND def _setup_django(): if "django" not in sys.modules: return import django.conf if not django.conf.settings.configured: return import django.apps if not django.apps.apps.ready: django.setup() _blocking_manager.block() def _get_boolean_value(x, name, default=None): if x is None: return default if x in (True, False): return x possible_values = {"true": True, "false": False, "1": True, "0": False} try: return possible_values[x.lower()] except KeyError: raise ValueError( "{} is not a valid value for {}. " "It must be one of {}.".format(x, name, ", ".join(possible_values.keys())) ) def pytest_load_initial_conftests(early_config, parser, args): early_config.addinivalue_line( "markers", "django_db(transaction=False): Mark the test as using " "the Django test database. The *transaction* argument marks will " "allow you to use real transactions in the test like Django's " "TransactionTestCase.", ) early_config.addinivalue_line( "markers", "urls(modstr): Use a different URLconf for this test, similar to " "the `urls` attribute of Django's `TestCase` objects. *modstr* is " "a string specifying the module of a URL config, e.g. " '"my_app.test_urls".', ) early_config.addinivalue_line( "markers", "ignore_template_errors(): ignore errors from invalid template " "variables (if --fail-on-template-vars is used).", ) options = parser.parse_known_args(args) if options.version or options.help: return django_find_project = _get_boolean_value( early_config.getini("django_find_project"), "django_find_project" ) if django_find_project: _django_project_scan_outcome = _add_django_project_to_path(args) else: _django_project_scan_outcome = PROJECT_SCAN_DISABLED if ( options.itv or _get_boolean_value( os.environ.get(INVALID_TEMPLATE_VARS_ENV), INVALID_TEMPLATE_VARS_ENV ) or early_config.getini(INVALID_TEMPLATE_VARS_ENV) ): os.environ[INVALID_TEMPLATE_VARS_ENV] = "true" def _get_option_with_source(option, envname): if option: return option, "option" if envname in os.environ: return os.environ[envname], "env" cfgval = early_config.getini(envname) if cfgval: return cfgval, "ini" return None, None ds, ds_source = _get_option_with_source(options.ds, SETTINGS_MODULE_ENV) dc, dc_source = _get_option_with_source(options.dc, CONFIGURATION_ENV) if ds: _report_header.append("settings: {} (from {})".format(ds, ds_source)) os.environ[SETTINGS_MODULE_ENV] = ds if dc: _report_header.append("configuration: {} (from {})".format(dc, dc_source)) os.environ[CONFIGURATION_ENV] = dc import configurations.importer configurations.importer.install() from django.conf import settings as dj_settings with _handle_import_error(_django_project_scan_outcome): dj_settings.DATABASES _setup_django() def pytest_report_header(): if _report_header: return ["django: " + ", ".join(_report_header)] @pytest.hookimpl(trylast=True) def pytest_configure(): _setup_django() @pytest.hookimpl(tryfirst=True) def pytest_collection_modifyitems(items): if not django_settings_is_configured(): return from django.test import TestCase, TransactionTestCase def get_order_number(test): if hasattr(test, "cls") and test.cls: # Beware, TestCase is a subclass of TransactionTestCase if issubclass(test.cls, TestCase): return 0 if issubclass(test.cls, TransactionTestCase): return 1 marker_db = test.get_closest_marker('django_db') if marker_db: transaction = validate_django_db(marker_db)[0] if transaction is True: return 1 else: transaction = None fixtures = getattr(test, 'fixturenames', []) if "transactional_db" in fixtures: return 1 if transaction is False: return 0 if "db" in fixtures: return 0 return 2 items[:] = sorted(items, key=get_order_number) @pytest.fixture(autouse=True, scope="session") def django_test_environment(request): if django_settings_is_configured(): _setup_django() from django.test.utils import setup_test_environment, teardown_test_environment debug_ini = request.config.getini("django_debug_mode") if debug_ini == "keep": debug = None else: debug = _get_boolean_value(debug_ini, False) setup_test_environment(debug=debug) request.addfinalizer(teardown_test_environment) @pytest.fixture(scope="session") def django_db_blocker(): if not django_settings_is_configured(): return None return _blocking_manager @pytest.fixture(autouse=True) def _django_db_marker(request): marker = request.node.get_closest_marker("django_db") if marker: transaction, reset_sequences = validate_django_db(marker) if reset_sequences: request.getfixturevalue("django_db_reset_sequences") elif transaction: request.getfixturevalue("transactional_db") else: request.getfixturevalue("db") @pytest.fixture(autouse=True, scope="class") def _django_setup_unittest(request, django_db_blocker): if not django_settings_is_configured() or not is_django_unittest(request): yield return # Fix/patch pytest. # Before pytest 5.4: https://github.com/pytest-dev/pytest/issues/5991 # After pytest 5.4: https://github.com/pytest-dev/pytest-django/issues/824 from _pytest.unittest import TestCaseFunction original_runtest = TestCaseFunction.runtest def non_debugging_runtest(self): self._testcase(result=self) try: TestCaseFunction.runtest = non_debugging_runtest request.getfixturevalue("django_db_setup") with django_db_blocker.unblock(): yield finally: TestCaseFunction.runtest = original_runtest @pytest.fixture(scope="function", autouse=True) def _dj_autoclear_mailbox(): if not django_settings_is_configured(): return from django.core import mail del mail.outbox[:] @pytest.fixture(scope="function") def mailoutbox(django_mail_patch_dns, _dj_autoclear_mailbox): if not django_settings_is_configured(): return from django.core import mail return mail.outbox @pytest.fixture(scope="function") def django_mail_patch_dns(monkeypatch, django_mail_dnsname): from django.core import mail monkeypatch.setattr(mail.message, "DNS_NAME", django_mail_dnsname) @pytest.fixture(scope="function") def django_mail_dnsname(): return "fake-tests.example.com" @pytest.fixture(autouse=True, scope="function") def _django_set_urlconf(request): marker = request.node.get_closest_marker("urls") if marker: skip_if_no_django() import django.conf from django.urls import clear_url_caches, set_urlconf urls = validate_urls(marker) original_urlconf = django.conf.settings.ROOT_URLCONF django.conf.settings.ROOT_URLCONF = urls clear_url_caches() set_urlconf(None) def restore(): django.conf.settings.ROOT_URLCONF = original_urlconf # Copy the pattern from # https://github.com/django/django/blob/master/django/test/signals.py#L152 clear_url_caches() set_urlconf(None) request.addfinalizer(restore) @pytest.fixture(autouse=True, scope="session") def _fail_for_invalid_template_variable(): class InvalidVarException: def __init__(self): self.fail = True def __contains__(self, key): return key == "%s" @staticmethod def _get_origin(): stack = inspect.stack() # Try to use topmost `self.origin` first (Django 1.9+, and with # TEMPLATE_DEBUG).. for f in stack[2:]: func = f[3] if func == "render": frame = f[0] try: origin = frame.f_locals["self"].origin except (AttributeError, KeyError): continue if origin is not None: return origin from django.template import Template # finding the ``render`` needle in the stack frame = reduce( lambda x, y: y[3] == "render" and "base.py" in y[1] and y or x, stack ) # assert 0, stack frame = frame[0] # finding only the frame locals in all frame members f_locals = reduce( lambda x, y: y[0] == "f_locals" and y or x, inspect.getmembers(frame) )[1] # ``django.template.base.Template`` template = f_locals["self"] if isinstance(template, Template): return template.name def __mod__(self, var): origin = self._get_origin() if origin: msg = "Undefined template variable '{}' in '{}'".format(var, origin) else: msg = "Undefined template variable '%s'" % var if self.fail: pytest.fail(msg) else: return msg if ( os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true" and django_settings_is_configured() ): from django.conf import settings as dj_settings if dj_settings.TEMPLATES: dj_settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"] = InvalidVarException() @pytest.fixture(autouse=True) def _template_string_if_invalid_marker(request): marker = request.keywords.get("ignore_template_errors", None) if os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true": if marker and django_settings_is_configured(): from django.conf import settings as dj_settings if dj_settings.TEMPLATES: dj_settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"].fail = False @pytest.fixture(autouse=True, scope="function") def _django_clear_site_cache(): if django_settings_is_configured(): from django.conf import settings as dj_settings if "django.contrib.sites" in dj_settings.INSTALLED_APPS: from django.contrib.sites.models import Site Site.objects.clear_cache() # ############### Helper Functions ################ class _DatabaseBlockerContextManager: def __init__(self, db_blocker): self._db_blocker = db_blocker def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): self._db_blocker.restore() class _DatabaseBlocker: def __init__(self): self._history = [] self._real_ensure_connection = None @property def _dj_db_wrapper(self): from django.db.backends.base.base import BaseDatabaseWrapper # The first time the _dj_db_wrapper is accessed, we will save a # reference to the real implementation. if self._real_ensure_connection is None: self._real_ensure_connection = BaseDatabaseWrapper.ensure_connection return BaseDatabaseWrapper def _save_active_wrapper(self): return self._history.append(self._dj_db_wrapper.ensure_connection) def _blocking_wrapper(*args, **kwargs): __tracebackhide__ = True __tracebackhide__ # Silence pyflakes raise RuntimeError( "Database access not allowed, " 'use the "django_db" mark, or the ' '"db" or "transactional_db" fixtures to enable it.' ) def unblock(self): self._save_active_wrapper() self._dj_db_wrapper.ensure_connection = self._real_ensure_connection return _DatabaseBlockerContextManager(self) def block(self): self._save_active_wrapper() self._dj_db_wrapper.ensure_connection = self._blocking_wrapper return _DatabaseBlockerContextManager(self) def restore(self): self._dj_db_wrapper.ensure_connection = self._history.pop() _blocking_manager = _DatabaseBlocker() def validate_django_db(marker): def apifun(transaction=False, reset_sequences=False): return transaction, reset_sequences return apifun(*marker.args, **marker.kwargs) def validate_urls(marker): def apifun(urls): return urls return apifun(*marker.args, **marker.kwargs)
true
true
f72489a44f3b9b2634bf77eab598bc59f36daa24
26,662
py
Python
src/squad/graphs.py
douglasdaly/spot-robot
7a4fdd7eb5fe5fc2d31180ed6b9f7ea21647bea2
[ "MIT" ]
null
null
null
src/squad/graphs.py
douglasdaly/spot-robot
7a4fdd7eb5fe5fc2d31180ed6b9f7ea21647bea2
[ "MIT" ]
null
null
null
src/squad/graphs.py
douglasdaly/spot-robot
7a4fdd7eb5fe5fc2d31180ed6b9f7ea21647bea2
[ "MIT" ]
null
null
null
from typing import Any, Dict, List, Optional, Tuple, Type, Union, overload import numpy as np from squad.exceptions import ( EdgeAlreadyExists, EdgeNotFound, NodeAlreadyExists, NodeNotFound, ) class Node: """ Single node in a graph. """ def __init__(self, name: str, **data: Any) -> None: self._name = name self._data = data @property def name(self) -> str: """str: The name of this node.""" return self._name @property def data(self) -> Dict[str, Any]: """Dict[str, Any]: The data stored in this node (if any).""" return self._data.copy() def __str__(self) -> str: return f"{self.__class__.__name__}({self._name})" def __hash__(self) -> int: return hash((self.__class__.__name__, self._name)) def __eq__(self, __o: object) -> bool: if isinstance(__o, Node): return self._name == __o._name elif isinstance(__o, str): return self._name == __o raise ValueError( f"Cannot compare {self.__class__.__name__} with" f" {type(__o).__name__}" ) def __getitem__(self, key: str) -> Any: return self._data[key] def update(self, **data: Any) -> None: """Updates the data stored on this node. Parameters ---------- **data : Any, optional The data parameters to update on this node. """ self._data.update(data) class Edge: """ Single edge in a graph. """ def __init__( self, u: Node, v: Node, weight: float = 1.0, **data: Any, ) -> None: self._u = u self._v = v self._wgt = weight self._value: Optional[float] = None self._data = data @property def u(self) -> Node: """Node: The first node in this edge.""" return self._u @property def v(self) -> Node: """Node: The second node in this edge.""" return self._v @property def weight(self) -> float: """float: The weight of this edge.""" return self._wgt @weight.setter def weight(self, value: float) -> None: self._wgt = value @property def value(self) -> float: """float: The value of this edge.""" if self._value is None: self._value = self.get_value() return self._value @value.setter def value(self, value: float) -> None: self._value = value @property def weighted_value(self) -> float: """float: The weighted-value of this edge.""" return self._wgt * self.value @property def data(self) -> Dict[str, Any]: """Dict[str, Any]: The data associated with this edge (if any).""" return self._data.copy() def __str__(self) -> str: return f"{self.__class__.__name__}({self._u.name}, {self._v.name})" def __hash__(self) -> int: return hash((self.__class__.__name__, self._u, self._v)) def __eq__(self, __o: object) -> bool: if isinstance(__o, Edge): return self._u == __o._u and self._v == __o._v elif isinstance(__o, tuple): return self._u._name == __o[0] and self._v._name == __o[1] raise ValueError( f"Cannot compare {self.__class__.__name__} with" f" {type(__o).__name__}" ) def __getitem__(self, key: str) -> Any: return self._data[key] def __call__(self, **kwargs: Any) -> float: self.update(**kwargs) return self._wgt * self.value def update(self, **data: Any) -> None: """Updates this edge's state. Parameters ---------- **data : Any Any named-parameters to update the edge's data with. """ if data: self._data.update(data) self._value = self.get_value() def get_value(self) -> float: """Gets the value associated with this edge. Returns ------- float The computed value for this edge. """ return 1.0 def remove_square_matrix_index(matrix: np.ndarray, index: int) -> np.ndarray: """Removes the row & column of the specified index from the given square matrix. Parameters ---------- matrix : np.ndarray The square matrix to remove the specified `index` row and column from. index : int The index of the row & column to remove from the given `matrix`. Returns ------- np.ndarray The new matrix, from the original `matrix` given, with the desired row & column `index` removed. Raises ------ ValueError If the given `matrix` is not a square matrix. IndexError If the given `index` is invalid for the bounds of the given `matrix`. """ if matrix.ndim < 2 or matrix.shape[0] != matrix.shape[1]: raise ValueError(f"Invalid matrix given, shape: {matrix.shape}") elif abs(index) > (matrix.shape[0] - 1): raise IndexError(index) return np.delete(np.delete(matrix, index, axis=0), index, axis=1) class Graph: """ Directed graph. """ def __init__( self, node_cls: Optional[Type[Node]] = None, edge_cls: Optional[Type[Edge]] = None, ) -> None: self._node_cls = node_cls or Node self._nodes: List[Node] = [] self._node_lookup: Dict[str, int] = {} self._edge_cls = edge_cls or Edge self._edges: List[Edge] = [] self._edge_lookup: Dict[Tuple[str, str], int] = {} self._adj_mat = np.array([], dtype=float) self._con_mat = self._adj_mat.copy() @property def nodes(self) -> Dict[str, Node]: """Dict[str, Node]: The nodes contained in this graph.""" return {x.name: x for x in self._nodes} @property def edges(self) -> Dict[str, Dict[str, Edge]]: """Dict[str, Dict[str, Edge]]: The edges in this graph.""" ret = {x.name: {} for x in self._nodes} for x in self._edges: ret[x.u.name][x.v.name] = x return ret def __getitem__( self, key: Union[str, Tuple[str, str]], ) -> Union[Edge, Node]: if isinstance(key, str): if key not in self._node_lookup: raise NodeNotFound(key) return self._nodes[self._node_lookup[key]] else: if key not in self._edge_lookup: raise EdgeNotFound(*key) return self._edges[self._edge_lookup[key]] def add(self, obj: Union[Edge, Node]) -> None: """Adds an edge or node to this graph. Parameters ---------- obj : Union[Edge, Node] The node or edge object to add to this graph. Raises ------ EdgeAlreadyExists If the given edge `obj` is already in this graph. NodeAlreadyExists If the given node `obj` is already in this graph. NodeNotFound If one or both of the nodes in the given edge `obj` is not in this graph. """ if isinstance(obj, Edge): if obj in self._edges: raise EdgeAlreadyExists(obj.u.name, obj.v.name) elif obj.u.name not in self._node_lookup: raise NodeNotFound(obj.u.name) elif obj.v.name not in self._node_lookup: raise NodeNotFound(obj.v.name) self._add_edge_obj(obj) else: if obj in self._nodes: raise NodeAlreadyExists(obj.name) self._add_node_obj(obj) return def remove(self, obj: Union[Edge, Node]) -> None: """Removes the given edge or node from this graph. Parameters ---------- obj : Union[Edge, Node] The edge or node object to remove from this graph. Raises ------ EdgeNotFound If the given edge `obj` could not be found. NodeNotFound If the given node `obj` could not be found. """ if isinstance(obj, Edge): if obj not in self._edges: raise EdgeNotFound(obj.u.name, obj.v.name) self._remove_edge_obj(obj.u.name, obj.v.name) else: if obj not in self._nodes: raise NodeNotFound(obj.name) self._remove_node_obj(obj.name) return def clear(self) -> None: """Clears all nodes and edges from this graph.""" self._node_lookup.clear() self._nodes.clear() self._edge_lookup.clear() self._edges.clear() self._adj_mat = np.array([], dtype=self._adj_mat.dtype) self._con_mat = self._adj_mat.copy() def _add_edge_obj(self, edge: Edge) -> None: """Adds a new edge object to this graph.""" self._edges.append(edge) new_n_edges = len(self._edges) self._edge_lookup[(edge.u.name, edge.v.name)] = new_n_edges - 1 idx_u = self._nodes.index(edge.u) idx_v = self._nodes.index(edge.v) self._adj_mat[idx_u, idx_v] = 1.0 self._con_mat[idx_u, idx_v] = 1.0 if idx_u != idx_v: self._con_mat[idx_v, idx_u] = 1.0 return def _remove_edge_obj(self, u_name: str, v_name: str) -> None: """Removes the specified edge from this graph.""" # - Update adjacency/connection matrices u_idx = self._node_lookup[u_name] v_idx = self._node_lookup[v_name] self._adj_mat[u_idx, v_idx] = 0.0 if u_idx == v_idx: self._con_mat[u_idx, v_idx] = 0.0 elif (v_name, u_name) not in self._edge_lookup: self._con_mat[u_idx, v_idx] = 0.0 self._con_mat[v_idx, u_idx] = 0.0 # - Remove edge edge_idx = self._edge_lookup.pop((u_name, v_name)) self._edges.pop(edge_idx) # - Update lookup table for relevant edges edge_names_to_update = [ (x.u.name, x.v.name) for x in self._edges[edge_idx:] ] for edge_name in edge_names_to_update: self._edge_lookup[edge_name] -= 1 return def _add_node_obj(self, node: Node) -> None: """Adds a new node object to this graph.""" orig_n_nodes = len(self._nodes) self._nodes.append(node) self._node_lookup[node.name] = orig_n_nodes new_n_nodes = orig_n_nodes + 1 upd_adj_mat = np.zeros( (new_n_nodes, new_n_nodes), dtype=self._adj_mat.dtype, ) upd_con_mat = upd_adj_mat.copy() if orig_n_nodes: upd_adj_mat[:orig_n_nodes, :orig_n_nodes] = self._adj_mat upd_con_mat[:orig_n_nodes, :orig_n_nodes] = self._con_mat self._adj_mat = upd_adj_mat self._con_mat = upd_con_mat def _remove_node_obj(self, node_name: str) -> None: """Removes an existing node object from this graph.""" node_idx = self._node_lookup[node_name] # Update the adjacency/connection matrices self._adj_mat = remove_square_matrix_index(self._adj_mat, node_idx) self._con_mat = remove_square_matrix_index(self._con_mat, node_idx) # - Remove any edge objects connected to the node def _edge_filter(x: Tuple[str, str]) -> bool: return node_name in x edge_idxs_to_remove = sorted( ( self._edge_lookup[k] for k in filter(_edge_filter, self._edge_lookup.keys()) ), reverse=True, ) edge_names_to_remove = [ (x.u.name, x.v.name) for x in (self._edges[i] for i in edge_idxs_to_remove) ] for i, n in zip(edge_idxs_to_remove, edge_names_to_remove): del self._edge_lookup[n] self._edges.pop(i) # - Remove the node object self._nodes.pop(node_idx) # - Update the lookup tables for node in self._nodes[node_idx:]: self._node_lookup[node.name] -= 1 for i, edge in enumerate(self._edges): self._edge_lookup[(edge.u.name, edge.v.name)] = i return def add_node(self, name: str, **data: Any) -> None: """Creates and adds a new node to this graph. Parameters ---------- name : str The name of the node to add to this graph. **data : Any The data of the node to add to this graph (if any). Raises ------ NodeAlreadyExists If a node with the same `name` given already exists in this graph. """ if name in (x.name for x in self._nodes): raise NodeAlreadyExists(name) new_node = self._node_cls(name, **data) self._add_node_obj(new_node) def add_nodes(self, *names: str, **data: Any) -> None: """Creates and adds new node(s) to this graph. Parameters ---------- *names : str The name(s) of the new nodes to create and add. **data : Any, optional The data (if any) to associate with each of the new nodes. Raises ------ NodeAlreadyExists If any of the nodes from the given `names` already exist in this graph. ValueError If no `names` are provided. """ for name in names: if name in self._node_lookup: raise NodeAlreadyExists(name) for name in names: new_node = self._node_cls(name, **data) self._add_node_obj(new_node) return def remove_node(self, name: str) -> None: """Removes the specified node from this graph. Parameters ---------- name : str The name of the node to remove. Raises ------ NodeNotFound If the node with the given `name` could not be found. """ if name not in self._node_lookup: raise NodeNotFound(name) self._remove_node_obj(name) def add_edge( self, u_name: str, v_name: str, weight: float = 1.0, **data: Any, ) -> None: """Creates and adds a new edge to this graph. Parameters ---------- u_name : str The name of the (existing) node to set as the first node for the new edge to add. v_name : str The name of the (existing) node to set as the second node for the new edge to add. weight : float, default=1.0 The weight to use for the new edge to add. **data : Any, optional The data (if any) to store on the new edge. Raises ------ EdgeAlreadyExists If an edge for the given nodes specified already exists in this graph. NodeNotFound If either of the given nodes specified could not be found. """ if (u_name, v_name) in ((x.u.name, x.v.name) for x in self._edges): raise EdgeAlreadyExists(u_name, v_name) u = None v = None for node in self._nodes: if node.name == u_name: u = node if node.name == v_name: v = node if u is not None and v is not None: break if u is None: raise NodeNotFound(u_name) if v is None: raise NodeNotFound(v_name) new_edge = self._edge_cls(u, v, weight=weight, **data) self._add_edge_obj(new_edge) def add_edges( self, u_name: str, *v_names: str, weight: float = 1.0, **data: Any, ) -> None: """Adds multiple edges from `u_name` to this graph. Parameters ---------- u_name : str The name of the (existing) node to set as the first node for the new edges to add. *v_names : str The names of the (existing) nodes to set as the second node for the new edge to add. weight : float, default=1.0 The weight to use for each new edge to add. **data : Any, optional The data (if any) to store on each new edge. Raises ------ EdgeAlreadyExists If any edge for the given nodes specified already exists in this graph. NodeNotFound If any of the given nodes specified could not be found. ValueError If no `v_names` are provided. """ if not v_names: raise ValueError("You must provide at least one v node name") if u_name not in self._node_lookup: raise NodeNotFound(u_name) else: for v in v_names: if v not in self._node_lookup: raise NodeNotFound(v) for e in ((u_name, v) for v in v_names): if e in self._edge_lookup: raise EdgeAlreadyExists(e[0], e[1]) u_node = self._nodes[self._node_lookup[u_name]] for v_name in v_names: v_node = self._nodes[self._node_lookup[v_name]] new_edge = self._edge_cls(u_node, v_node, weight=weight, **data) self._add_edge_obj(new_edge) return def remove_edge(self, u_name: str, v_name: str) -> None: """Removes the edge specified from this graph. Parameters ---------- u_name : str The name of the first node in the edge to remove. v_name : str The name of the second node in the edge to remove. Raises ------ EdgeNotFound If the specified edge could not be found. NodeNotFound If either node specified by the given `u_name` and `v_name` could not be found. """ if u_name not in self._node_lookup: raise NodeNotFound(u_name) elif v_name not in self._node_lookup: raise NodeNotFound(v_name) elif (u_name, v_name) not in self._edge_lookup: raise EdgeNotFound(u_name, v_name) self._remove_edge_obj(u_name, v_name) def update_nodes(self, *names: str, **data: Any) -> None: """Updates the node(s) in this graph. Parameters ---------- *names : str, optional The specific node(s) to update (if not given then all nodes will be updated). **data : Any, optional The data updates to push to all nodes in the graph for the update calls. """ if names: nodes = (x for x in self._nodes if x.name in names) else: nodes = self._nodes for node in nodes: node.update(**data) return def update_edges(self, *names: str, **data: Any) -> None: """Updates all the edges in this graph. Parameters ---------- *names : str, optional The u-node (first node) names of the relevant edges to update (if not provided then all edges are updated). **data : Any, optional Any data updates to push to all edges in the graph for the update calls. """ if names: edges = (x for x in self._edges if x.u.name in names) else: edges = self._edges for edge in edges: edge.update(**data) return @overload def adj_edges(self, u_name: str) -> List[Edge]: ... @overload def adj_edges(self, u_name: str, v_name: str) -> Edge: ... def adj_edges( self, u_name: str, v_name: Optional[str] = None, ) -> Union[Edge, List[Edge]]: """Gets the adjacenct edge(s) specified. Parameters ---------- u_name : str The name of the node to get the adjacent edge(s) *from*. v_name : str, optional The name of the node to get the adjacent edge(s) *to* (if any). If not specified (default) it will return all possible adjacent edges. Returns ------- Edge or List[Edge] The adjacent edge(s) from the specified `u_name` (if `v_name` was not specified). If `v_name` was given then it just returns the adjacent edge from the specified `u_name` node to the specified `v_name` node. Raises ------ NodeNotFound If the specified `u_name` node (or `v_name` node, if given) could not be found. EdgeNotFound If the specified `u_name` to `v_name` (if given) edge could not be found. See Also -------- adj, adj_values, adj_weights """ u_idx = None v_idx = None for i, node in enumerate(self._nodes): if node.name == u_name: u_idx = i if v_name is not None: if node.name == v_name: v_idx = i if u_idx is not None and v_idx is not None: break elif u_idx is not None: break if u_idx is None: raise NodeNotFound(u_name) if v_name is not None and v_idx is None: raise NodeNotFound(v_name) if v_name is None: # - All adjacent edges adj_edges: List[Edge] = [] for i, v in enumerate(self._adj_mat[u_idx]): if v == 0.0: continue v_node = self._nodes[i] t_edge = self._edges[self._edge_lookup[(u_name, v_node.name)]] adj_edges.append(t_edge) return adj_edges else: # - Single edge try: adj_edge = self._edges[self._edge_lookup[(u_name, v_name)]] except KeyError: raise EdgeNotFound(u_name, v_name) return adj_edge @overload def adj_values( self, u_name: str, ) -> Dict[str, float]: ... @overload def adj_values( self, u_name: str, v_name: str, ) -> float: ... def adj_values( self, u_name: str, v_name: Optional[str] = None, ) -> Union[float, Dict[str, float]]: """Gets the adjacency edge value(s) for the specified node/edge. Parameters ---------- u_name : str The name of the node to get the adjacency data *from*. v_name : str, optional The name of the node to get the adjacency data *to* (if any). If not specified (default) it will return all possible adjacent nodes and values. Returns ------- float or Dict[str, float] The adjacent edges and values from the specified `u_name` (if `v_name` was not specified). If `v_name` was given then it just returns the value of the adjacency edge from the specified `u_name` node to the specified `v_name` node. See Also -------- adj, adj_edges, adj_weights """ # - Single edge value if v_name is not None: edge = self.adj_edges(u_name, v_name) return edge.value # - All adjacent edge values edges = self.adj_edges(u_name) ret = {x.v.name: x.value for x in edges} return ret @overload def adj( self, u_name: str, ) -> Dict[str, float]: ... @overload def adj( self, u_name: str, v_name: str, ) -> float: ... def adj( self, u_name: str, v_name: Optional[str] = None, ) -> Union[float, Dict[str, float]]: """Gets the adjacency edge weighted-value(s) for the specified node/edge. Parameters ---------- u_name : str The name of the node to get the adjacency data *from*. v_name : str, optional The name of the node to get the adjacency data *to* (if any). If not specified (default) it will return all possible adjacent nodes and values. Returns ------- float or Dict[str, float] The adjacent edges and weighted-values from the specified `u_name` (if `v_name` was not specified). If `v_name` was given then it just returns the weighted-value of the adjacent edge from the specified `u_name` node to the specified `v_name` node. See Also -------- adj_edges, adj_values, adj_weights """ # - Single edge value if v_name is not None: edge = self.adj_edges(u_name, v_name) return edge.weighted_value # - All adjacent edge values edges = self.adj_edges(u_name) ret = {x.v.name: x.weighted_value for x in edges} return ret @overload def adj_weights( self, u_name: str, ) -> Dict[str, float]: ... @overload def adj_weights( self, u_name: str, v_name: str, ) -> float: ... def adj_weights( self, u_name: str, v_name: Optional[str] = None, ) -> Union[float, Dict[str, float]]: """Gets the adjacency edge weight(s) of the specified node/edge. Parameters ---------- u_name : str The name of the node to get the adjacency data *from*. v_name : str, optional The name of the node to get the adjacency data *to* (if any). If not specified (default) it will return all possible adjacent nodes and values. Returns ------- float or Dict[str, float] The adjacent edges and weight(s) from the specified `u_name` node (if `v_name` was not specified). If `v_name` was given then it just returns the raw value of the adjacent edge from the specified `u_name` node to the specified `v_name` node. See Also -------- adj, adj_edges, adj_values """ # - Single edge value if v_name is not None: edge = self.adj_edges(u_name, v_name) return edge.weight # - All adjacent edge values edges = self.adj_edges(u_name) ret = {x.v.name: x.weight for x in edges} return ret
29.39581
78
0.543808
from typing import Any, Dict, List, Optional, Tuple, Type, Union, overload import numpy as np from squad.exceptions import ( EdgeAlreadyExists, EdgeNotFound, NodeAlreadyExists, NodeNotFound, ) class Node: def __init__(self, name: str, **data: Any) -> None: self._name = name self._data = data @property def name(self) -> str: return self._name @property def data(self) -> Dict[str, Any]: return self._data.copy() def __str__(self) -> str: return f"{self.__class__.__name__}({self._name})" def __hash__(self) -> int: return hash((self.__class__.__name__, self._name)) def __eq__(self, __o: object) -> bool: if isinstance(__o, Node): return self._name == __o._name elif isinstance(__o, str): return self._name == __o raise ValueError( f"Cannot compare {self.__class__.__name__} with" f" {type(__o).__name__}" ) def __getitem__(self, key: str) -> Any: return self._data[key] def update(self, **data: Any) -> None: self._data.update(data) class Edge: def __init__( self, u: Node, v: Node, weight: float = 1.0, **data: Any, ) -> None: self._u = u self._v = v self._wgt = weight self._value: Optional[float] = None self._data = data @property def u(self) -> Node: return self._u @property def v(self) -> Node: return self._v @property def weight(self) -> float: return self._wgt @weight.setter def weight(self, value: float) -> None: self._wgt = value @property def value(self) -> float: if self._value is None: self._value = self.get_value() return self._value @value.setter def value(self, value: float) -> None: self._value = value @property def weighted_value(self) -> float: return self._wgt * self.value @property def data(self) -> Dict[str, Any]: return self._data.copy() def __str__(self) -> str: return f"{self.__class__.__name__}({self._u.name}, {self._v.name})" def __hash__(self) -> int: return hash((self.__class__.__name__, self._u, self._v)) def __eq__(self, __o: object) -> bool: if isinstance(__o, Edge): return self._u == __o._u and self._v == __o._v elif isinstance(__o, tuple): return self._u._name == __o[0] and self._v._name == __o[1] raise ValueError( f"Cannot compare {self.__class__.__name__} with" f" {type(__o).__name__}" ) def __getitem__(self, key: str) -> Any: return self._data[key] def __call__(self, **kwargs: Any) -> float: self.update(**kwargs) return self._wgt * self.value def update(self, **data: Any) -> None: if data: self._data.update(data) self._value = self.get_value() def get_value(self) -> float: return 1.0 def remove_square_matrix_index(matrix: np.ndarray, index: int) -> np.ndarray: if matrix.ndim < 2 or matrix.shape[0] != matrix.shape[1]: raise ValueError(f"Invalid matrix given, shape: {matrix.shape}") elif abs(index) > (matrix.shape[0] - 1): raise IndexError(index) return np.delete(np.delete(matrix, index, axis=0), index, axis=1) class Graph: def __init__( self, node_cls: Optional[Type[Node]] = None, edge_cls: Optional[Type[Edge]] = None, ) -> None: self._node_cls = node_cls or Node self._nodes: List[Node] = [] self._node_lookup: Dict[str, int] = {} self._edge_cls = edge_cls or Edge self._edges: List[Edge] = [] self._edge_lookup: Dict[Tuple[str, str], int] = {} self._adj_mat = np.array([], dtype=float) self._con_mat = self._adj_mat.copy() @property def nodes(self) -> Dict[str, Node]: return {x.name: x for x in self._nodes} @property def edges(self) -> Dict[str, Dict[str, Edge]]: ret = {x.name: {} for x in self._nodes} for x in self._edges: ret[x.u.name][x.v.name] = x return ret def __getitem__( self, key: Union[str, Tuple[str, str]], ) -> Union[Edge, Node]: if isinstance(key, str): if key not in self._node_lookup: raise NodeNotFound(key) return self._nodes[self._node_lookup[key]] else: if key not in self._edge_lookup: raise EdgeNotFound(*key) return self._edges[self._edge_lookup[key]] def add(self, obj: Union[Edge, Node]) -> None: if isinstance(obj, Edge): if obj in self._edges: raise EdgeAlreadyExists(obj.u.name, obj.v.name) elif obj.u.name not in self._node_lookup: raise NodeNotFound(obj.u.name) elif obj.v.name not in self._node_lookup: raise NodeNotFound(obj.v.name) self._add_edge_obj(obj) else: if obj in self._nodes: raise NodeAlreadyExists(obj.name) self._add_node_obj(obj) return def remove(self, obj: Union[Edge, Node]) -> None: if isinstance(obj, Edge): if obj not in self._edges: raise EdgeNotFound(obj.u.name, obj.v.name) self._remove_edge_obj(obj.u.name, obj.v.name) else: if obj not in self._nodes: raise NodeNotFound(obj.name) self._remove_node_obj(obj.name) return def clear(self) -> None: self._node_lookup.clear() self._nodes.clear() self._edge_lookup.clear() self._edges.clear() self._adj_mat = np.array([], dtype=self._adj_mat.dtype) self._con_mat = self._adj_mat.copy() def _add_edge_obj(self, edge: Edge) -> None: self._edges.append(edge) new_n_edges = len(self._edges) self._edge_lookup[(edge.u.name, edge.v.name)] = new_n_edges - 1 idx_u = self._nodes.index(edge.u) idx_v = self._nodes.index(edge.v) self._adj_mat[idx_u, idx_v] = 1.0 self._con_mat[idx_u, idx_v] = 1.0 if idx_u != idx_v: self._con_mat[idx_v, idx_u] = 1.0 return def _remove_edge_obj(self, u_name: str, v_name: str) -> None: u_idx = self._node_lookup[u_name] v_idx = self._node_lookup[v_name] self._adj_mat[u_idx, v_idx] = 0.0 if u_idx == v_idx: self._con_mat[u_idx, v_idx] = 0.0 elif (v_name, u_name) not in self._edge_lookup: self._con_mat[u_idx, v_idx] = 0.0 self._con_mat[v_idx, u_idx] = 0.0 edge_idx = self._edge_lookup.pop((u_name, v_name)) self._edges.pop(edge_idx) edge_names_to_update = [ (x.u.name, x.v.name) for x in self._edges[edge_idx:] ] for edge_name in edge_names_to_update: self._edge_lookup[edge_name] -= 1 return def _add_node_obj(self, node: Node) -> None: orig_n_nodes = len(self._nodes) self._nodes.append(node) self._node_lookup[node.name] = orig_n_nodes new_n_nodes = orig_n_nodes + 1 upd_adj_mat = np.zeros( (new_n_nodes, new_n_nodes), dtype=self._adj_mat.dtype, ) upd_con_mat = upd_adj_mat.copy() if orig_n_nodes: upd_adj_mat[:orig_n_nodes, :orig_n_nodes] = self._adj_mat upd_con_mat[:orig_n_nodes, :orig_n_nodes] = self._con_mat self._adj_mat = upd_adj_mat self._con_mat = upd_con_mat def _remove_node_obj(self, node_name: str) -> None: node_idx = self._node_lookup[node_name] self._adj_mat = remove_square_matrix_index(self._adj_mat, node_idx) self._con_mat = remove_square_matrix_index(self._con_mat, node_idx) def _edge_filter(x: Tuple[str, str]) -> bool: return node_name in x edge_idxs_to_remove = sorted( ( self._edge_lookup[k] for k in filter(_edge_filter, self._edge_lookup.keys()) ), reverse=True, ) edge_names_to_remove = [ (x.u.name, x.v.name) for x in (self._edges[i] for i in edge_idxs_to_remove) ] for i, n in zip(edge_idxs_to_remove, edge_names_to_remove): del self._edge_lookup[n] self._edges.pop(i) self._nodes.pop(node_idx) for node in self._nodes[node_idx:]: self._node_lookup[node.name] -= 1 for i, edge in enumerate(self._edges): self._edge_lookup[(edge.u.name, edge.v.name)] = i return def add_node(self, name: str, **data: Any) -> None: if name in (x.name for x in self._nodes): raise NodeAlreadyExists(name) new_node = self._node_cls(name, **data) self._add_node_obj(new_node) def add_nodes(self, *names: str, **data: Any) -> None: for name in names: if name in self._node_lookup: raise NodeAlreadyExists(name) for name in names: new_node = self._node_cls(name, **data) self._add_node_obj(new_node) return def remove_node(self, name: str) -> None: if name not in self._node_lookup: raise NodeNotFound(name) self._remove_node_obj(name) def add_edge( self, u_name: str, v_name: str, weight: float = 1.0, **data: Any, ) -> None: if (u_name, v_name) in ((x.u.name, x.v.name) for x in self._edges): raise EdgeAlreadyExists(u_name, v_name) u = None v = None for node in self._nodes: if node.name == u_name: u = node if node.name == v_name: v = node if u is not None and v is not None: break if u is None: raise NodeNotFound(u_name) if v is None: raise NodeNotFound(v_name) new_edge = self._edge_cls(u, v, weight=weight, **data) self._add_edge_obj(new_edge) def add_edges( self, u_name: str, *v_names: str, weight: float = 1.0, **data: Any, ) -> None: if not v_names: raise ValueError("You must provide at least one v node name") if u_name not in self._node_lookup: raise NodeNotFound(u_name) else: for v in v_names: if v not in self._node_lookup: raise NodeNotFound(v) for e in ((u_name, v) for v in v_names): if e in self._edge_lookup: raise EdgeAlreadyExists(e[0], e[1]) u_node = self._nodes[self._node_lookup[u_name]] for v_name in v_names: v_node = self._nodes[self._node_lookup[v_name]] new_edge = self._edge_cls(u_node, v_node, weight=weight, **data) self._add_edge_obj(new_edge) return def remove_edge(self, u_name: str, v_name: str) -> None: if u_name not in self._node_lookup: raise NodeNotFound(u_name) elif v_name not in self._node_lookup: raise NodeNotFound(v_name) elif (u_name, v_name) not in self._edge_lookup: raise EdgeNotFound(u_name, v_name) self._remove_edge_obj(u_name, v_name) def update_nodes(self, *names: str, **data: Any) -> None: if names: nodes = (x for x in self._nodes if x.name in names) else: nodes = self._nodes for node in nodes: node.update(**data) return def update_edges(self, *names: str, **data: Any) -> None: if names: edges = (x for x in self._edges if x.u.name in names) else: edges = self._edges for edge in edges: edge.update(**data) return @overload def adj_edges(self, u_name: str) -> List[Edge]: ... @overload def adj_edges(self, u_name: str, v_name: str) -> Edge: ... def adj_edges( self, u_name: str, v_name: Optional[str] = None, ) -> Union[Edge, List[Edge]]: u_idx = None v_idx = None for i, node in enumerate(self._nodes): if node.name == u_name: u_idx = i if v_name is not None: if node.name == v_name: v_idx = i if u_idx is not None and v_idx is not None: break elif u_idx is not None: break if u_idx is None: raise NodeNotFound(u_name) if v_name is not None and v_idx is None: raise NodeNotFound(v_name) if v_name is None: adj_edges: List[Edge] = [] for i, v in enumerate(self._adj_mat[u_idx]): if v == 0.0: continue v_node = self._nodes[i] t_edge = self._edges[self._edge_lookup[(u_name, v_node.name)]] adj_edges.append(t_edge) return adj_edges else: try: adj_edge = self._edges[self._edge_lookup[(u_name, v_name)]] except KeyError: raise EdgeNotFound(u_name, v_name) return adj_edge @overload def adj_values( self, u_name: str, ) -> Dict[str, float]: ... @overload def adj_values( self, u_name: str, v_name: str, ) -> float: ... def adj_values( self, u_name: str, v_name: Optional[str] = None, ) -> Union[float, Dict[str, float]]: if v_name is not None: edge = self.adj_edges(u_name, v_name) return edge.value edges = self.adj_edges(u_name) ret = {x.v.name: x.value for x in edges} return ret @overload def adj( self, u_name: str, ) -> Dict[str, float]: ... @overload def adj( self, u_name: str, v_name: str, ) -> float: ... def adj( self, u_name: str, v_name: Optional[str] = None, ) -> Union[float, Dict[str, float]]: if v_name is not None: edge = self.adj_edges(u_name, v_name) return edge.weighted_value edges = self.adj_edges(u_name) ret = {x.v.name: x.weighted_value for x in edges} return ret @overload def adj_weights( self, u_name: str, ) -> Dict[str, float]: ... @overload def adj_weights( self, u_name: str, v_name: str, ) -> float: ... def adj_weights( self, u_name: str, v_name: Optional[str] = None, ) -> Union[float, Dict[str, float]]: if v_name is not None: edge = self.adj_edges(u_name, v_name) return edge.weight edges = self.adj_edges(u_name) ret = {x.v.name: x.weight for x in edges} return ret
true
true
f72489d8e00e85d8c00ed35e505fae2c30fe7577
1,139
py
Python
Advanced-Algorithms-and-Complexity/Week3 - P vs NP/03 - Budget Allocation/budget_allocation.py
ChristineHu1207/Coursera-Data-Structures-and-Algorithms-Specialization
27f543ca0778d00ffd624ffcd18bf555660e0168
[ "MIT" ]
null
null
null
Advanced-Algorithms-and-Complexity/Week3 - P vs NP/03 - Budget Allocation/budget_allocation.py
ChristineHu1207/Coursera-Data-Structures-and-Algorithms-Specialization
27f543ca0778d00ffd624ffcd18bf555660e0168
[ "MIT" ]
null
null
null
Advanced-Algorithms-and-Complexity/Week3 - P vs NP/03 - Budget Allocation/budget_allocation.py
ChristineHu1207/Coursera-Data-Structures-and-Algorithms-Specialization
27f543ca0778d00ffd624ffcd18bf555660e0168
[ "MIT" ]
null
null
null
# python3 import itertools n, m = list(map(int, input().split())) A = [] for i in range(n): A += [list(map(int, input().split()))] b = list(map(int, input().split())) clauses = [] for i, coefficient in enumerate(A): non_coefficients = [(j, coefficient[j]) for j in range(m) if 0 != coefficient[j]] l = len(non_coefficients) for x in range(2 ** l): current_set = [non_coefficients[j] for j in range(l) if 1 == ((x / 2 ** j) % 2) // 1] current_sum = 0 for coeff in current_set: current_sum += coeff[1] if current_sum > b[i]: clauses.append([-(coeff[0]+1) for coeff in current_set] + [(coeff[0]+1) for coeff in non_coefficients if coeff not in current_set]) if len(clauses) == 0: clauses.append([1, -1]) m = 1 print(len(clauses), m) for c in clauses: c.append(0) print(' '.join(map(str, c))) # This solution prints a simple satisfiable formula # and passes about half of the tests. # Change this function to solve the problem. # def printEquisatisfiableSatFormula(): # print("3 2") # print("1 2 0") # print("-1 -2 0") # print("1 -2 0") # printEquisatisfiableSatFormula()
24.76087
137
0.627744
import itertools n, m = list(map(int, input().split())) A = [] for i in range(n): A += [list(map(int, input().split()))] b = list(map(int, input().split())) clauses = [] for i, coefficient in enumerate(A): non_coefficients = [(j, coefficient[j]) for j in range(m) if 0 != coefficient[j]] l = len(non_coefficients) for x in range(2 ** l): current_set = [non_coefficients[j] for j in range(l) if 1 == ((x / 2 ** j) % 2) // 1] current_sum = 0 for coeff in current_set: current_sum += coeff[1] if current_sum > b[i]: clauses.append([-(coeff[0]+1) for coeff in current_set] + [(coeff[0]+1) for coeff in non_coefficients if coeff not in current_set]) if len(clauses) == 0: clauses.append([1, -1]) m = 1 print(len(clauses), m) for c in clauses: c.append(0) print(' '.join(map(str, c)))
true
true
f72489ec9d755295d9c7b8adc3c280594304173c
7,911
py
Python
bokeh/_version.py
timelyportfolio/bokeh
a976a85535cf137c6238ce9e90b41ab14ae8ce22
[ "BSD-3-Clause" ]
2
2015-07-23T21:19:52.000Z
2016-01-25T17:00:15.000Z
bokeh/_version.py
brian15co/bokeh
6cecb7211277b9d838039d0eb15e50a10f9ac3d1
[ "BSD-3-Clause" ]
null
null
null
bokeh/_version.py
brian15co/bokeh
6cecb7211277b9d838039d0eb15e50a10f9ac3d1
[ "BSD-3-Clause" ]
2
2015-12-22T04:13:10.000Z
2021-07-06T21:18:04.000Z
IN_LONG_VERSION_PY = True # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by github's download-from-tag # feature). Distribution tarballs (build by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.7+ (https://github.com/warner/python-versioneer) # these strings will be replaced by git during git-archive git_refnames = "$Format:%d$" git_full = "$Format:%H$" GIT = "git" import subprocess import sys def run_command(args, cwd=None, verbose=False): try: # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd) except EnvironmentError: e = sys.exc_info()[1] if verbose: print("unable to run %s" % args[0]) print(e) return None stdout = p.communicate()[0].strip() if sys.version >= '3': stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % args[0]) return None return stdout import sys import re import os.path def get_expanded_variables(versionfile_source): # the code embedded in _version.py can just fetch the value of these # variables. When used from setup.py, we don't want to import # _version.py, so we do it with a regexp instead. This function is not # used from _version.py. variables = {} try: for line in open(versionfile_source,"r").readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["full"] = mo.group(1) except EnvironmentError: pass return variables def versions_from_expanded_variables(variables, tag_prefix, verbose=False): refnames = variables["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("variables are unexpanded, not using") return {} # unexpanded, so not in an unpacked git-archive tarball refs = set([r.strip() for r in refnames.strip("()").split(",")]) for ref in list(refs): if not re.search(r'\d', ref): if verbose: print("discarding '%s', no digits" % ref) refs.discard(ref) # Assume all version tags have a digit. git's %d expansion # behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us # distinguish between branches and tags. By ignoring refnames # without digits, we filter out many common branch names like # "release" and "stabilization", as well as "HEAD" and "master". if verbose: print("remaining refs: %s" % ",".join(sorted(refs))) for ref in sorted(refs): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return { "version": r, "full": variables["full"].strip() } # no suitable tags, so we use the full revision id if verbose: print("no suitable tags, using full revision id") return { "version": variables["full"].strip(), "full": variables["full"].strip() } def versions_from_vcs(tag_prefix, versionfile_source, verbose=False): # this runs 'git' from the root of the source tree. That either means # someone ran a setup.py command (and this code is in versioneer.py, so # IN_LONG_VERSION_PY=False, thus the containing directory is the root of # the source tree), or someone ran a project-specific entry point (and # this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the # containing directory is somewhere deeper in the source tree). This only # gets called if the git-archive 'subst' variables were *not* expanded, # and _version.py hasn't already been rewritten with a short version # string, meaning we're inside a checked out source tree. try: here = os.path.realpath(__file__) except NameError: # some py2exe/bbfreeze/non-CPython implementations don't do __file__ return {} # not always correct # versionfile_source is the relative path from the top of the source tree # (where the .git directory might live) to this file. Invert this to find # the root from __file__. root = here if IN_LONG_VERSION_PY: for i in range(len(versionfile_source.split("/"))): root = os.path.dirname(root) else: root = os.path.dirname(here) if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %s" % root) return {} stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"], cwd=root) if stdout is None: return {} if not stdout.startswith(tag_prefix): if verbose: print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix)) return {} tag = stdout[len(tag_prefix):] stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root) if stdout is None: return {} full = stdout.strip() if tag.endswith("-dirty"): full += "-dirty" return {"version": tag, "full": full} def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False): if IN_LONG_VERSION_PY: # We're running from _version.py. If it's from a source tree # (execute-in-place), we can work upwards to find the root of the # tree, and then check the parent directory for a version string. If # it's in an installed application, there's no hope. try: here = os.path.realpath(__file__) except NameError: # py2exe/bbfreeze/non-CPython don't have __file__ return {} # without __file__, we have no hope # versionfile_source is the relative path from the top of the source # tree to _version.py. Invert this to find the root from __file__. root = here for i in range(len(versionfile_source.split("/"))): root = os.path.dirname(root) else: # we're running from versioneer.py, which means we're running from # the setup.py in a source tree. sys.argv[0] is setup.py in the root. here = os.path.realpath(sys.argv[0]) root = os.path.dirname(here) # Source tarballs conventionally unpack into a directory that includes # both the project name and a version string. dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" % (root, dirname, parentdir_prefix)) return None return {"version": dirname[len(parentdir_prefix):], "full": ""} tag_prefix = "" parentdir_prefix = "Bokeh-" versionfile_source = "bokeh/_version.py" def get_versions(default={"version": "unknown", "full": ""}, verbose=False): variables = { "refnames": git_refnames, "full": git_full } ver = versions_from_expanded_variables(variables, tag_prefix, verbose) if not ver: ver = versions_from_vcs(tag_prefix, versionfile_source, verbose) if not ver: ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose) if not ver: ver = default return ver
40.362245
87
0.627102
IN_LONG_VERSION_PY = True # feature). Distribution tarballs (build by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.7+ (https://github.com/warner/python-versioneer) # these strings will be replaced by git during git-archive git_refnames = "$Format:%d$" git_full = "$Format:%H$" GIT = "git" import subprocess import sys def run_command(args, cwd=None, verbose=False): try: # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd) except EnvironmentError: e = sys.exc_info()[1] if verbose: print("unable to run %s" % args[0]) print(e) return None stdout = p.communicate()[0].strip() if sys.version >= '3': stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % args[0]) return None return stdout import sys import re import os.path def get_expanded_variables(versionfile_source): # the code embedded in _version.py can just fetch the value of these # variables. When used from setup.py, we don't want to import variables = {} try: for line in open(versionfile_source,"r").readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: variables["full"] = mo.group(1) except EnvironmentError: pass return variables def versions_from_expanded_variables(variables, tag_prefix, verbose=False): refnames = variables["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("variables are unexpanded, not using") return {} refs = set([r.strip() for r in refnames.strip("()").split(",")]) for ref in list(refs): if not re.search(r'\d', ref): if verbose: print("discarding '%s', no digits" % ref) refs.discard(ref) # behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us # distinguish between branches and tags. By ignoring refnames # without digits, we filter out many common branch names like # "release" and "stabilization", as well as "HEAD" and "master". if verbose: print("remaining refs: %s" % ",".join(sorted(refs))) for ref in sorted(refs): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return { "version": r, "full": variables["full"].strip() } # no suitable tags, so we use the full revision id if verbose: print("no suitable tags, using full revision id") return { "version": variables["full"].strip(), "full": variables["full"].strip() } def versions_from_vcs(tag_prefix, versionfile_source, verbose=False): # this runs 'git' from the root of the source tree. That either means # someone ran a setup.py command (and this code is in versioneer.py, so # IN_LONG_VERSION_PY=False, thus the containing directory is the root of # the source tree), or someone ran a project-specific entry point (and # this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the # containing directory is somewhere deeper in the source tree). This only # gets called if the git-archive 'subst' variables were *not* expanded, # and _version.py hasn't already been rewritten with a short version try: here = os.path.realpath(__file__) except NameError: # some py2exe/bbfreeze/non-CPython implementations don't do __file__ return {} root = here if IN_LONG_VERSION_PY: for i in range(len(versionfile_source.split("/"))): root = os.path.dirname(root) else: root = os.path.dirname(here) if not os.path.exists(os.path.join(root, ".git")): if verbose: print("no .git in %s" % root) return {} stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"], cwd=root) if stdout is None: return {} if not stdout.startswith(tag_prefix): if verbose: print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix)) return {} tag = stdout[len(tag_prefix):] stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root) if stdout is None: return {} full = stdout.strip() if tag.endswith("-dirty"): full += "-dirty" return {"version": tag, "full": full} def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False): if IN_LONG_VERSION_PY: # We're running from _version.py. If it's from a source tree # (execute-in-place), we can work upwards to find the root of the # tree, and then check the parent directory for a version string. If # it's in an installed application, there's no hope. try: here = os.path.realpath(__file__) except NameError: # py2exe/bbfreeze/non-CPython don't have __file__ return {} root = here for i in range(len(versionfile_source.split("/"))): root = os.path.dirname(root) else: here = os.path.realpath(sys.argv[0]) root = os.path.dirname(here) dirname = os.path.basename(root) if not dirname.startswith(parentdir_prefix): if verbose: print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" % (root, dirname, parentdir_prefix)) return None return {"version": dirname[len(parentdir_prefix):], "full": ""} tag_prefix = "" parentdir_prefix = "Bokeh-" versionfile_source = "bokeh/_version.py" def get_versions(default={"version": "unknown", "full": ""}, verbose=False): variables = { "refnames": git_refnames, "full": git_full } ver = versions_from_expanded_variables(variables, tag_prefix, verbose) if not ver: ver = versions_from_vcs(tag_prefix, versionfile_source, verbose) if not ver: ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose) if not ver: ver = default return ver
true
true
f7248af32f72c111effbd60171246b9815ed3cb7
368
py
Python
sol_runner.py
Square789/AoC
041aecb9e1a06b5417bdef0eb0ab70a542be04b5
[ "MIT" ]
3
2020-12-05T17:43:51.000Z
2020-12-06T10:37:29.000Z
sol_runner.py
Square789/AoC
041aecb9e1a06b5417bdef0eb0ab70a542be04b5
[ "MIT" ]
null
null
null
sol_runner.py
Square789/AoC
041aecb9e1a06b5417bdef0eb0ab70a542be04b5
[ "MIT" ]
null
null
null
import importlib import sys from aoc_input import get_input if __name__ == "__main__": if len(sys.argv) < 3: print("Specify which file to run! [year, day]") sys.exit() try: year = int(sys.argv[1]) day = int(sys.argv[2]) except ValueError: print("Integer required!") sys.exit() module = importlib.import_module(f"y{year}.d{day:>02}") module.main()
18.4
56
0.673913
import importlib import sys from aoc_input import get_input if __name__ == "__main__": if len(sys.argv) < 3: print("Specify which file to run! [year, day]") sys.exit() try: year = int(sys.argv[1]) day = int(sys.argv[2]) except ValueError: print("Integer required!") sys.exit() module = importlib.import_module(f"y{year}.d{day:>02}") module.main()
true
true
f7248b04de6e0f200dd961244469207c2c19aa5a
39,165
py
Python
validator/sawtooth_validator/gossip/gossip.py
ltavag/sawtooth-core
50659f23437b27ecd666d4cf129f812e6adaedc4
[ "Apache-2.0" ]
1
2018-04-24T11:42:36.000Z
2018-04-24T11:42:36.000Z
validator/sawtooth_validator/gossip/gossip.py
ltavag/sawtooth-core
50659f23437b27ecd666d4cf129f812e6adaedc4
[ "Apache-2.0" ]
null
null
null
validator/sawtooth_validator/gossip/gossip.py
ltavag/sawtooth-core
50659f23437b27ecd666d4cf129f812e6adaedc4
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ import logging import copy import time import random import os import binascii from threading import Lock from functools import partial from collections import namedtuple from enum import Enum from sawtooth_validator.concurrent.thread import InstrumentedThread from sawtooth_validator.protobuf.network_pb2 import DisconnectMessage from sawtooth_validator.protobuf.network_pb2 import GossipMessage from sawtooth_validator.protobuf.network_pb2 import GossipBatchByBatchIdRequest from sawtooth_validator.protobuf.network_pb2 import \ GossipBatchByTransactionIdRequest from sawtooth_validator.protobuf.network_pb2 import GossipBlockRequest from sawtooth_validator.protobuf import validator_pb2 from sawtooth_validator.protobuf.network_pb2 import PeerRegisterRequest from sawtooth_validator.protobuf.network_pb2 import PeerUnregisterRequest from sawtooth_validator.protobuf.network_pb2 import GetPeersRequest from sawtooth_validator.protobuf.network_pb2 import GetPeersResponse from sawtooth_validator.protobuf.network_pb2 import NetworkAcknowledgement from sawtooth_validator.exceptions import PeeringException LOGGER = logging.getLogger(__name__) class PeerStatus(Enum): CLOSED = 1 TEMP = 2 PEER = 3 class EndpointStatus(Enum): # Endpoint will be used for peering PEERING = 1 # Endpoint will be used to request peers TOPOLOGY = 2 EndpointInfo = namedtuple('EndpointInfo', ['status', 'time', "retry_threshold"]) StaticPeerInfo = namedtuple('StaticPeerInfo', ['time', 'retry_threshold', 'count']) INITIAL_RETRY_FREQUENCY = 10 MAXIMUM_RETRY_FREQUENCY = 300 MAXIMUM_STATIC_RETRY_FREQUENCY = 3600 MAXIMUM_STATIC_RETRIES = 24 TIME_TO_LIVE = 3 # This is the protocol version number. It should only be incremented when # there are changes to the network protocols, as well as only once per # release. NETWORK_PROTOCOL_VERSION = 1 class Gossip(object): def __init__(self, network, settings_cache, current_chain_head_func, current_root_func, endpoint=None, peering_mode='static', initial_seed_endpoints=None, initial_peer_endpoints=None, minimum_peer_connectivity=3, maximum_peer_connectivity=10, topology_check_frequency=1 ): """Constructor for the Gossip object. Gossip defines the overlay network above the lower level networking classes. Args: network (networking.Interconnect): Provides inbound and outbound network connections. settings_cache (state.SettingsCache): A cache for on chain settings. current_chain_head_func (function): returns the current chain head. current_root_func (function): returns the current state root hash for the current chain root. endpoint (str): The publically accessible zmq-style uri endpoint for this validator. peering_mode (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. initial_seed_endpoints ([str]): A list of initial endpoints to attempt to connect and gather initial topology buildout information from. These are specified as zmq-compatible URIs (e.g. tcp://hostname:port). initial_peer_endpoints ([str]): A list of initial peer endpoints to attempt to connect and peer with. These are specified as zmq-compatible URIs (e.g. tcp://hostname:port). minimum_peer_connectivity (int): If the number of connected peers is below this threshold, the topology builder will continue to attempt to identify new candidate peers to connect with. maximum_peer_connectivity (int): The validator will reject new peer requests if the number of connected peers reaches this threshold. topology_check_frequency (int): The time in seconds between topology update checks. """ self._peering_mode = peering_mode self._lock = Lock() self._network = network self._endpoint = endpoint self._initial_seed_endpoints = initial_seed_endpoints \ if initial_seed_endpoints else [] self._initial_peer_endpoints = initial_peer_endpoints \ if initial_peer_endpoints else [] self._minimum_peer_connectivity = minimum_peer_connectivity self._maximum_peer_connectivity = maximum_peer_connectivity self._topology_check_frequency = topology_check_frequency self._settings_cache = settings_cache self._current_chain_head_func = current_chain_head_func self._current_root_func = current_root_func self._topology = None self._peers = {} def send_peers(self, connection_id): """Sends a message containing our peers to the connection identified by connection_id. Args: connection_id (str): A unique identifier which identifies an connection on the network server socket. """ with self._lock: # Needs to actually be the list of advertised endpoints of # our peers peer_endpoints = list(self._peers.values()) if self._endpoint: peer_endpoints.append(self._endpoint) peers_response = GetPeersResponse(peer_endpoints=peer_endpoints) try: # Send a one_way message because the connection will be closed # if this is a temp connection. self._network.send( validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE, peers_response.SerializeToString(), connection_id, one_way=True) except ValueError: LOGGER.debug("Connection disconnected: %s", connection_id) def add_candidate_peer_endpoints(self, peer_endpoints): """Adds candidate endpoints to the list of endpoints to attempt to peer with. Args: peer_endpoints ([str]): A list of public uri's which the validator can attempt to peer with. """ if self._topology: self._topology.add_candidate_peer_endpoints(peer_endpoints) else: LOGGER.debug("Could not add peer endpoints to topology. " "ConnectionManager does not exist.") def get_peers(self): """Returns a copy of the gossip peers. """ with self._lock: return copy.copy(self._peers) @property def endpoint(self): """Returns the validator's public endpoint. """ return self._endpoint def register_peer(self, connection_id, endpoint): """Registers a connected connection_id. Args: connection_id (str): A unique identifier which identifies an connection on the network server socket. endpoint (str): The publically reachable endpoint of the new peer """ with self._lock: if len(self._peers) < self._maximum_peer_connectivity: self._peers[connection_id] = endpoint self._topology.set_connection_status(connection_id, PeerStatus.PEER) LOGGER.debug("Added connection_id %s with endpoint %s, " "connected identities are now %s", connection_id, endpoint, self._peers) else: raise PeeringException( "At maximum configured number of peers: {} " "Rejecting peering request from {}.".format( self._maximum_peer_connectivity, endpoint)) def unregister_peer(self, connection_id): """Removes a connection_id from the registry. Args: connection_id (str): A unique identifier which identifies an connection on the network server socket. """ with self._lock: if connection_id in self._peers: del self._peers[connection_id] LOGGER.debug("Removed connection_id %s, " "connected identities are now %s", connection_id, self._peers) self._topology.set_connection_status(connection_id, PeerStatus.TEMP) else: LOGGER.warning("Connection unregister failed as connection " "was not registered: %s", connection_id) def get_time_to_live(self): time_to_live = \ self._settings_cache.get_setting( "sawtooth.gossip.time_to_live", self._current_root_func(), default_value=TIME_TO_LIVE ) return int(time_to_live) def broadcast_block(self, block, exclude=None, time_to_live=None): if time_to_live is None: time_to_live = self.get_time_to_live() gossip_message = GossipMessage( content_type=GossipMessage.BLOCK, content=block.SerializeToString(), time_to_live=time_to_live) self.broadcast( gossip_message, validator_pb2.Message.GOSSIP_MESSAGE, exclude) def broadcast_block_request(self, block_id): time_to_live = self.get_time_to_live() block_request = GossipBlockRequest( block_id=block_id, nonce=binascii.b2a_hex(os.urandom(16)), time_to_live=time_to_live) self.broadcast(block_request, validator_pb2.Message.GOSSIP_BLOCK_REQUEST) def send_block_request(self, block_id, connection_id): time_to_live = self.get_time_to_live() block_request = GossipBlockRequest( block_id=block_id, nonce=binascii.b2a_hex(os.urandom(16)), time_to_live=time_to_live) self.send(validator_pb2.Message.GOSSIP_BLOCK_REQUEST, block_request.SerializeToString(), connection_id, one_way=True) def broadcast_batch(self, batch, exclude=None, time_to_live=None): if time_to_live is None: time_to_live = self.get_time_to_live() gossip_message = GossipMessage( content_type=GossipMessage.BATCH, content=batch.SerializeToString(), time_to_live=time_to_live) self.broadcast( gossip_message, validator_pb2.Message.GOSSIP_MESSAGE, exclude) def broadcast_batch_by_transaction_id_request(self, transaction_ids): time_to_live = self.get_time_to_live() batch_request = GossipBatchByTransactionIdRequest( ids=transaction_ids, nonce=binascii.b2a_hex(os.urandom(16)), time_to_live=time_to_live) self.broadcast( batch_request, validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST) def broadcast_batch_by_batch_id_request(self, batch_id): time_to_live = self.get_time_to_live() batch_request = GossipBatchByBatchIdRequest( id=batch_id, nonce=binascii.b2a_hex(os.urandom(16)), time_to_live=time_to_live) self.broadcast( batch_request, validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST) def send(self, message_type, message, connection_id, one_way=False): """Sends a message via the network. Args: message_type (str): The type of the message. message (bytes): The message to be sent. connection_id (str): The connection to send it to. """ try: self._network.send(message_type, message, connection_id, one_way=one_way) except ValueError: LOGGER.debug("Connection %s is no longer valid. " "Removing from list of peers.", connection_id) if connection_id in self._peers: del self._peers[connection_id] def broadcast(self, gossip_message, message_type, exclude=None): """Broadcast gossip messages. Broadcast the message to all peers unless they are in the excluded list. Args: gossip_message: The message to be broadcast. message_type: Type of the message. exclude: A list of connection_ids that should be excluded from this broadcast. """ with self._lock: if exclude is None: exclude = [] for connection_id in self._peers.copy(): if connection_id not in exclude and \ self._network.is_connection_handshake_complete( connection_id): self.send( message_type, gossip_message.SerializeToString(), connection_id, one_way=True) def connect_success(self, connection_id): """ Notify topology that a connection has been properly authorized Args: connection_id: The connection id for the authorized connection. """ if self._topology: self._topology.connect_success(connection_id) def remove_temp_endpoint(self, endpoint): """ Remove temporary endpoints that never finished authorization. Args: endpoint: The endpoint that is not authorized to connect to the network. """ if self._topology: self._topology.remove_temp_endpoint(endpoint) def start(self): self._topology = ConnectionManager( gossip=self, network=self._network, endpoint=self._endpoint, current_chain_head_func=self._current_chain_head_func, initial_peer_endpoints=self._initial_peer_endpoints, initial_seed_endpoints=self._initial_seed_endpoints, peering_mode=self._peering_mode, min_peers=self._minimum_peer_connectivity, max_peers=self._maximum_peer_connectivity, check_frequency=self._topology_check_frequency) self._topology.start() def stop(self): for peer in self.get_peers(): request = PeerUnregisterRequest() try: self._network.send(validator_pb2.Message.GOSSIP_UNREGISTER, request.SerializeToString(), peer) except ValueError: pass if self._topology: self._topology.stop() class ConnectionManager(InstrumentedThread): def __init__(self, gossip, network, endpoint, current_chain_head_func, initial_peer_endpoints, initial_seed_endpoints, peering_mode, min_peers=3, max_peers=10, check_frequency=1): """Constructor for the ConnectionManager class. Args: gossip (gossip.Gossip): The gossip overlay network. network (network.Interconnect): The underlying network. endpoint (str): A zmq-style endpoint uri representing this validator's publically reachable endpoint. current_chain_head_func (function): Returns the current chain head. initial_peer_endpoints ([str]): A list of static peers to attempt to connect and peer with. initial_seed_endpoints ([str]): A list of endpoints to connect to and get candidate peer lists to attempt to reach min_peers threshold. peering_mode (str): Either 'static' or 'dynamic'. 'static' only connects to peers in initial_peer_endpoints. 'dynamic' connects to peers in initial_peer_endpoints and gets candidate peer lists from initial_seed_endpoints. min_peers (int): The minimum number of peers required to stop attempting candidate connections. max_peers (int): The maximum number of active peer connections to allow. check_frequency (int): How often to attempt dynamic connectivity. """ super().__init__(name="ConnectionManager") self._lock = Lock() self._stopped = False self._gossip = gossip self._network = network self._endpoint = endpoint self._current_chain_head_func = current_chain_head_func self._initial_peer_endpoints = initial_peer_endpoints self._initial_seed_endpoints = initial_seed_endpoints self._peering_mode = peering_mode self._min_peers = min_peers self._max_peers = max_peers self._check_frequency = check_frequency self._candidate_peer_endpoints = [] # Seconds to wait for messages to arrive self._response_duration = 2 self._connection_statuses = {} self._temp_endpoints = {} self._static_peer_status = {} def start(self): # First, attempt to connect to explicit peers for endpoint in self._initial_peer_endpoints: self._static_peer_status[endpoint] = \ StaticPeerInfo( time=0, retry_threshold=INITIAL_RETRY_FREQUENCY, count=0) super().start() def run(self): has_chain_head = self._current_chain_head_func() is not None while not self._stopped: try: if self._peering_mode == 'dynamic': self.retry_dynamic_peering() elif self._peering_mode == 'static': self.retry_static_peering() # This tests for a degenerate case where the node is connected # to peers, but at first connection no peer had a valid chain # head. Keep querying connected peers until a valid chain head # is received. has_chain_head = has_chain_head or \ self._current_chain_head_func() is not None if not has_chain_head: peered_connections = self._get_peered_connections() if peered_connections: LOGGER.debug( 'Have not received a chain head from peers. ' 'Requesting from %s', peered_connections) self._request_chain_head(peered_connections) time.sleep(self._check_frequency) except Exception: # pylint: disable=broad-except LOGGER.exception("Unhandled exception during peer refresh") def stop(self): self._stopped = True for connection_id in self._connection_statuses: try: if self._connection_statuses[connection_id] == \ PeerStatus.CLOSED: continue msg = DisconnectMessage() self._network.send( validator_pb2.Message.NETWORK_DISCONNECT, msg.SerializeToString(), connection_id) self._connection_statuses[connection_id] = PeerStatus.CLOSED except ValueError: # Connection has already been disconnected. pass def _get_peered_connections(self): peers = self._gossip.get_peers() return [conn_id for conn_id in peers if self._connection_statuses[conn_id] == PeerStatus.PEER] def _request_chain_head(self, peered_connections): """Request chain head from the given peer ids. Args: peered_connecions (:list:str): a list of peer connection ids where the requests will be sent. """ for conn_id in peered_connections: self._gossip.send_block_request("HEAD", conn_id) def retry_dynamic_peering(self): self._refresh_peer_list(self._gossip.get_peers()) peers = self._gossip.get_peers() peer_count = len(peers) if peer_count < self._min_peers: LOGGER.debug( "Number of peers (%s) below " "minimum peer threshold (%s). " "Doing topology search.", peer_count, self._min_peers) self._reset_candidate_peer_endpoints() self._refresh_peer_list(peers) # Cleans out any old connections that have disconnected self._refresh_connection_list() self._check_temp_endpoints() peers = self._gossip.get_peers() self._get_peers_of_peers(peers) self._get_peers_of_endpoints( peers, self._initial_seed_endpoints) # Wait for GOSSIP_GET_PEER_RESPONSE messages to arrive time.sleep(self._response_duration) peered_endpoints = list(peers.values()) with self._lock: unpeered_candidates = list( set(self._candidate_peer_endpoints) - set(peered_endpoints) - set([self._endpoint])) LOGGER.debug( "Peers are: %s. " "Unpeered candidates are: %s", peered_endpoints, unpeered_candidates) if unpeered_candidates: self._attempt_to_peer_with_endpoint( random.choice(unpeered_candidates)) def retry_static_peering(self): with self._lock: # Endpoints that have reached their retry count and should be # removed to_remove = [] for endpoint in self._initial_peer_endpoints: connection_id = None try: connection_id = \ self._network.get_connection_id_by_endpoint(endpoint) except KeyError: pass static_peer_info = self._static_peer_status[endpoint] if connection_id is not None: if connection_id in self._connection_statuses: # Endpoint is already a Peer if self._connection_statuses[connection_id] == \ PeerStatus.PEER: # reset static peering info self._static_peer_status[endpoint] = \ StaticPeerInfo( time=0, retry_threshold=INITIAL_RETRY_FREQUENCY, count=0) continue if (time.time() - static_peer_info.time) > \ static_peer_info.retry_threshold: LOGGER.debug("Endpoint has not completed authorization in " "%s seconds: %s", static_peer_info.retry_threshold, endpoint) if connection_id is not None: # If the connection exists remove it before retrying to # authorize. try: self._network.remove_connection(connection_id) except KeyError: pass if static_peer_info.retry_threshold == \ MAXIMUM_STATIC_RETRY_FREQUENCY: if static_peer_info.count >= MAXIMUM_STATIC_RETRIES: # Unable to peer with endpoint to_remove.append(endpoint) continue else: # At maximum retry threashold, increment count self._static_peer_status[endpoint] = \ StaticPeerInfo( time=time.time(), retry_threshold=min( static_peer_info.retry_threshold * 2, MAXIMUM_STATIC_RETRY_FREQUENCY), count=static_peer_info.count + 1) else: self._static_peer_status[endpoint] = \ StaticPeerInfo( time=time.time(), retry_threshold=min( static_peer_info.retry_threshold * 2, MAXIMUM_STATIC_RETRY_FREQUENCY), count=0) LOGGER.debug("attempting to peer with %s", endpoint) self._network.add_outbound_connection(endpoint) self._temp_endpoints[endpoint] = EndpointInfo( EndpointStatus.PEERING, time.time(), INITIAL_RETRY_FREQUENCY) for endpoint in to_remove: # Endpoints that have reached their retry count and should be # removed self._initial_peer_endpoints.remove(endpoint) del self._static_peer_status[endpoint] def add_candidate_peer_endpoints(self, peer_endpoints): """Adds candidate endpoints to the list of endpoints to attempt to peer with. Args: peer_endpoints ([str]): A list of public uri's which the validator can attempt to peer with. """ with self._lock: for endpoint in peer_endpoints: if endpoint not in self._candidate_peer_endpoints: self._candidate_peer_endpoints.append(endpoint) def set_connection_status(self, connection_id, status): self._connection_statuses[connection_id] = status def remove_temp_endpoint(self, endpoint): with self._lock: if endpoint in self._temp_endpoints: del self._temp_endpoints[endpoint] def _check_temp_endpoints(self): with self._lock: for endpoint in self._temp_endpoints: endpoint_info = self._temp_endpoints[endpoint] if (time.time() - endpoint_info.time) > \ endpoint_info.retry_threshold: LOGGER.debug("Endpoint has not completed authorization in " "%s seconds: %s", endpoint_info.retry_threshold, endpoint) try: # If the connection exists remove it before retrying to # authorize. If the connection does not exist, a # KeyError will be thrown. conn_id = \ self._network.get_connection_id_by_endpoint( endpoint) self._network.remove_connection(conn_id) except KeyError: pass self._network.add_outbound_connection(endpoint) self._temp_endpoints[endpoint] = EndpointInfo( endpoint_info.status, time.time(), min(endpoint_info.retry_threshold * 2, MAXIMUM_RETRY_FREQUENCY)) def _refresh_peer_list(self, peers): for conn_id in peers: try: self._network.get_connection_id_by_endpoint( peers[conn_id]) except KeyError: LOGGER.debug("removing peer %s because " "connection went away", peers[conn_id]) self._gossip.unregister_peer(conn_id) if conn_id in self._connection_statuses: del self._connection_statuses[conn_id] def _refresh_connection_list(self): with self._lock: closed_connections = [] for connection_id in self._connection_statuses: if not self._network.has_connection(connection_id): closed_connections.append(connection_id) for connection_id in closed_connections: del self._connection_statuses[connection_id] def _get_peers_of_peers(self, peers): get_peers_request = GetPeersRequest() for conn_id in peers: try: self._network.send( validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST, get_peers_request.SerializeToString(), conn_id) except ValueError: LOGGER.debug("Peer disconnected: %s", conn_id) def _get_peers_of_endpoints(self, peers, endpoints): get_peers_request = GetPeersRequest() for endpoint in endpoints: conn_id = None try: conn_id = self._network.get_connection_id_by_endpoint( endpoint) except KeyError: # If the connection does not exist, send a connection request with self._lock: if endpoint in self._temp_endpoints: del self._temp_endpoints[endpoint] self._temp_endpoints[endpoint] = EndpointInfo( EndpointStatus.TOPOLOGY, time.time(), INITIAL_RETRY_FREQUENCY) self._network.add_outbound_connection(endpoint) # If the connection does exist, request peers. if conn_id is not None: if not self._network.is_connection_handshake_complete(conn_id): # has not finished the authorization (trust/challenge) # process yet. continue elif conn_id in peers: # connected and peered - we've already sent peer request continue else: # connected but not peered if endpoint in self._temp_endpoints: # Endpoint is not yet authorized, do not request peers continue try: self._network.send( validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST, get_peers_request.SerializeToString(), conn_id) except ValueError: LOGGER.debug("Connection disconnected: %s", conn_id) def _attempt_to_peer_with_endpoint(self, endpoint): LOGGER.debug("Attempting to connect/peer with %s", endpoint) # check if the connection exists, if it does - send, # otherwise create it try: connection_id = \ self._network.get_connection_id_by_endpoint( endpoint) register_request = PeerRegisterRequest( endpoint=self._endpoint, protocol_version=NETWORK_PROTOCOL_VERSION) self._network.send( validator_pb2.Message.GOSSIP_REGISTER, register_request.SerializeToString(), connection_id, callback=partial( self._peer_callback, endpoint=endpoint, connection_id=connection_id)) except KeyError: # if the connection uri wasn't found in the network's # connections, it raises a KeyError and we need to add # a new outbound connection with self._lock: self._temp_endpoints[endpoint] = EndpointInfo( EndpointStatus.PEERING, time.time(), INITIAL_RETRY_FREQUENCY) self._network.add_outbound_connection(endpoint) def _reset_candidate_peer_endpoints(self): with self._lock: self._candidate_peer_endpoints = [] def _peer_callback(self, request, result, connection_id, endpoint=None): with self._lock: ack = NetworkAcknowledgement() ack.ParseFromString(result.content) if ack.status == ack.ERROR: LOGGER.debug("Peering request to %s was NOT successful", connection_id) self._remove_temporary_connection(connection_id) elif ack.status == ack.OK: LOGGER.debug("Peering request to %s was successful", connection_id) if endpoint: try: self._gossip.register_peer(connection_id, endpoint) self._connection_statuses[connection_id] = \ PeerStatus.PEER self._gossip.send_block_request("HEAD", connection_id) except PeeringException as e: # Remove unsuccessful peer LOGGER.warning('Unable to successfully peer with ' 'connection_id: %s, due to %s', connection_id, str(e)) self._remove_temporary_connection(connection_id) else: LOGGER.debug("Cannot register peer with no endpoint for " "connection_id: %s", connection_id) self._remove_temporary_connection(connection_id) def _remove_temporary_connection(self, connection_id): status = self._connection_statuses.get(connection_id) if status == PeerStatus.TEMP: LOGGER.debug("Closing connection to %s", connection_id) msg = DisconnectMessage() try: self._network.send(validator_pb2.Message.NETWORK_DISCONNECT, msg.SerializeToString(), connection_id) except ValueError: pass del self._connection_statuses[connection_id] self._network.remove_connection(connection_id) elif status == PeerStatus.PEER: LOGGER.debug("Connection close request for peer ignored: %s", connection_id) elif status is None: LOGGER.debug("Connection close request for unknown connection " "ignored: %s", connection_id) def connect_success(self, connection_id): """ Check to see if the successful connection is meant to be peered with. If not, it should be used to get the peers from the endpoint. """ endpoint = self._network.connection_id_to_endpoint(connection_id) endpoint_info = self._temp_endpoints.get(endpoint) LOGGER.debug("Endpoint has completed authorization: %s (id: %s)", endpoint, connection_id) if endpoint_info is None: LOGGER.debug("Received unknown endpoint: %s", endpoint) elif endpoint_info.status == EndpointStatus.PEERING: self._connect_success_peering(connection_id, endpoint) elif endpoint_info.status == EndpointStatus.TOPOLOGY: self._connect_success_topology(connection_id) else: LOGGER.debug("Endpoint has unknown status: %s", endpoint) with self._lock: if endpoint in self._temp_endpoints: del self._temp_endpoints[endpoint] def _connect_success_peering(self, connection_id, endpoint): LOGGER.debug("Connection to %s succeeded", connection_id) register_request = PeerRegisterRequest( endpoint=self._endpoint, protocol_version=NETWORK_PROTOCOL_VERSION) self._connection_statuses[connection_id] = PeerStatus.TEMP try: self._network.send( validator_pb2.Message.GOSSIP_REGISTER, register_request.SerializeToString(), connection_id, callback=partial( self._peer_callback, connection_id=connection_id, endpoint=endpoint)) except ValueError: LOGGER.debug("Connection disconnected: %s", connection_id) def _connect_success_topology(self, connection_id): LOGGER.debug("Connection to %s succeeded for topology request", connection_id) self._connection_statuses[connection_id] = PeerStatus.TEMP get_peers_request = GetPeersRequest() def callback(request, result): # request, result are ignored, but required by the callback self._remove_temporary_connection(connection_id) try: self._network.send( validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST, get_peers_request.SerializeToString(), connection_id, callback=callback) except ValueError: LOGGER.debug("Connection disconnected: %s", connection_id)
41.664894
80
0.578808
import logging import copy import time import random import os import binascii from threading import Lock from functools import partial from collections import namedtuple from enum import Enum from sawtooth_validator.concurrent.thread import InstrumentedThread from sawtooth_validator.protobuf.network_pb2 import DisconnectMessage from sawtooth_validator.protobuf.network_pb2 import GossipMessage from sawtooth_validator.protobuf.network_pb2 import GossipBatchByBatchIdRequest from sawtooth_validator.protobuf.network_pb2 import \ GossipBatchByTransactionIdRequest from sawtooth_validator.protobuf.network_pb2 import GossipBlockRequest from sawtooth_validator.protobuf import validator_pb2 from sawtooth_validator.protobuf.network_pb2 import PeerRegisterRequest from sawtooth_validator.protobuf.network_pb2 import PeerUnregisterRequest from sawtooth_validator.protobuf.network_pb2 import GetPeersRequest from sawtooth_validator.protobuf.network_pb2 import GetPeersResponse from sawtooth_validator.protobuf.network_pb2 import NetworkAcknowledgement from sawtooth_validator.exceptions import PeeringException LOGGER = logging.getLogger(__name__) class PeerStatus(Enum): CLOSED = 1 TEMP = 2 PEER = 3 class EndpointStatus(Enum): PEERING = 1 TOPOLOGY = 2 EndpointInfo = namedtuple('EndpointInfo', ['status', 'time', "retry_threshold"]) StaticPeerInfo = namedtuple('StaticPeerInfo', ['time', 'retry_threshold', 'count']) INITIAL_RETRY_FREQUENCY = 10 MAXIMUM_RETRY_FREQUENCY = 300 MAXIMUM_STATIC_RETRY_FREQUENCY = 3600 MAXIMUM_STATIC_RETRIES = 24 TIME_TO_LIVE = 3 NETWORK_PROTOCOL_VERSION = 1 class Gossip(object): def __init__(self, network, settings_cache, current_chain_head_func, current_root_func, endpoint=None, peering_mode='static', initial_seed_endpoints=None, initial_peer_endpoints=None, minimum_peer_connectivity=3, maximum_peer_connectivity=10, topology_check_frequency=1 ): self._peering_mode = peering_mode self._lock = Lock() self._network = network self._endpoint = endpoint self._initial_seed_endpoints = initial_seed_endpoints \ if initial_seed_endpoints else [] self._initial_peer_endpoints = initial_peer_endpoints \ if initial_peer_endpoints else [] self._minimum_peer_connectivity = minimum_peer_connectivity self._maximum_peer_connectivity = maximum_peer_connectivity self._topology_check_frequency = topology_check_frequency self._settings_cache = settings_cache self._current_chain_head_func = current_chain_head_func self._current_root_func = current_root_func self._topology = None self._peers = {} def send_peers(self, connection_id): with self._lock: peer_endpoints = list(self._peers.values()) if self._endpoint: peer_endpoints.append(self._endpoint) peers_response = GetPeersResponse(peer_endpoints=peer_endpoints) try: self._network.send( validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE, peers_response.SerializeToString(), connection_id, one_way=True) except ValueError: LOGGER.debug("Connection disconnected: %s", connection_id) def add_candidate_peer_endpoints(self, peer_endpoints): if self._topology: self._topology.add_candidate_peer_endpoints(peer_endpoints) else: LOGGER.debug("Could not add peer endpoints to topology. " "ConnectionManager does not exist.") def get_peers(self): with self._lock: return copy.copy(self._peers) @property def endpoint(self): return self._endpoint def register_peer(self, connection_id, endpoint): with self._lock: if len(self._peers) < self._maximum_peer_connectivity: self._peers[connection_id] = endpoint self._topology.set_connection_status(connection_id, PeerStatus.PEER) LOGGER.debug("Added connection_id %s with endpoint %s, " "connected identities are now %s", connection_id, endpoint, self._peers) else: raise PeeringException( "At maximum configured number of peers: {} " "Rejecting peering request from {}.".format( self._maximum_peer_connectivity, endpoint)) def unregister_peer(self, connection_id): with self._lock: if connection_id in self._peers: del self._peers[connection_id] LOGGER.debug("Removed connection_id %s, " "connected identities are now %s", connection_id, self._peers) self._topology.set_connection_status(connection_id, PeerStatus.TEMP) else: LOGGER.warning("Connection unregister failed as connection " "was not registered: %s", connection_id) def get_time_to_live(self): time_to_live = \ self._settings_cache.get_setting( "sawtooth.gossip.time_to_live", self._current_root_func(), default_value=TIME_TO_LIVE ) return int(time_to_live) def broadcast_block(self, block, exclude=None, time_to_live=None): if time_to_live is None: time_to_live = self.get_time_to_live() gossip_message = GossipMessage( content_type=GossipMessage.BLOCK, content=block.SerializeToString(), time_to_live=time_to_live) self.broadcast( gossip_message, validator_pb2.Message.GOSSIP_MESSAGE, exclude) def broadcast_block_request(self, block_id): time_to_live = self.get_time_to_live() block_request = GossipBlockRequest( block_id=block_id, nonce=binascii.b2a_hex(os.urandom(16)), time_to_live=time_to_live) self.broadcast(block_request, validator_pb2.Message.GOSSIP_BLOCK_REQUEST) def send_block_request(self, block_id, connection_id): time_to_live = self.get_time_to_live() block_request = GossipBlockRequest( block_id=block_id, nonce=binascii.b2a_hex(os.urandom(16)), time_to_live=time_to_live) self.send(validator_pb2.Message.GOSSIP_BLOCK_REQUEST, block_request.SerializeToString(), connection_id, one_way=True) def broadcast_batch(self, batch, exclude=None, time_to_live=None): if time_to_live is None: time_to_live = self.get_time_to_live() gossip_message = GossipMessage( content_type=GossipMessage.BATCH, content=batch.SerializeToString(), time_to_live=time_to_live) self.broadcast( gossip_message, validator_pb2.Message.GOSSIP_MESSAGE, exclude) def broadcast_batch_by_transaction_id_request(self, transaction_ids): time_to_live = self.get_time_to_live() batch_request = GossipBatchByTransactionIdRequest( ids=transaction_ids, nonce=binascii.b2a_hex(os.urandom(16)), time_to_live=time_to_live) self.broadcast( batch_request, validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST) def broadcast_batch_by_batch_id_request(self, batch_id): time_to_live = self.get_time_to_live() batch_request = GossipBatchByBatchIdRequest( id=batch_id, nonce=binascii.b2a_hex(os.urandom(16)), time_to_live=time_to_live) self.broadcast( batch_request, validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST) def send(self, message_type, message, connection_id, one_way=False): try: self._network.send(message_type, message, connection_id, one_way=one_way) except ValueError: LOGGER.debug("Connection %s is no longer valid. " "Removing from list of peers.", connection_id) if connection_id in self._peers: del self._peers[connection_id] def broadcast(self, gossip_message, message_type, exclude=None): with self._lock: if exclude is None: exclude = [] for connection_id in self._peers.copy(): if connection_id not in exclude and \ self._network.is_connection_handshake_complete( connection_id): self.send( message_type, gossip_message.SerializeToString(), connection_id, one_way=True) def connect_success(self, connection_id): if self._topology: self._topology.connect_success(connection_id) def remove_temp_endpoint(self, endpoint): if self._topology: self._topology.remove_temp_endpoint(endpoint) def start(self): self._topology = ConnectionManager( gossip=self, network=self._network, endpoint=self._endpoint, current_chain_head_func=self._current_chain_head_func, initial_peer_endpoints=self._initial_peer_endpoints, initial_seed_endpoints=self._initial_seed_endpoints, peering_mode=self._peering_mode, min_peers=self._minimum_peer_connectivity, max_peers=self._maximum_peer_connectivity, check_frequency=self._topology_check_frequency) self._topology.start() def stop(self): for peer in self.get_peers(): request = PeerUnregisterRequest() try: self._network.send(validator_pb2.Message.GOSSIP_UNREGISTER, request.SerializeToString(), peer) except ValueError: pass if self._topology: self._topology.stop() class ConnectionManager(InstrumentedThread): def __init__(self, gossip, network, endpoint, current_chain_head_func, initial_peer_endpoints, initial_seed_endpoints, peering_mode, min_peers=3, max_peers=10, check_frequency=1): super().__init__(name="ConnectionManager") self._lock = Lock() self._stopped = False self._gossip = gossip self._network = network self._endpoint = endpoint self._current_chain_head_func = current_chain_head_func self._initial_peer_endpoints = initial_peer_endpoints self._initial_seed_endpoints = initial_seed_endpoints self._peering_mode = peering_mode self._min_peers = min_peers self._max_peers = max_peers self._check_frequency = check_frequency self._candidate_peer_endpoints = [] self._response_duration = 2 self._connection_statuses = {} self._temp_endpoints = {} self._static_peer_status = {} def start(self): for endpoint in self._initial_peer_endpoints: self._static_peer_status[endpoint] = \ StaticPeerInfo( time=0, retry_threshold=INITIAL_RETRY_FREQUENCY, count=0) super().start() def run(self): has_chain_head = self._current_chain_head_func() is not None while not self._stopped: try: if self._peering_mode == 'dynamic': self.retry_dynamic_peering() elif self._peering_mode == 'static': self.retry_static_peering() has_chain_head = has_chain_head or \ self._current_chain_head_func() is not None if not has_chain_head: peered_connections = self._get_peered_connections() if peered_connections: LOGGER.debug( 'Have not received a chain head from peers. ' 'Requesting from %s', peered_connections) self._request_chain_head(peered_connections) time.sleep(self._check_frequency) except Exception: LOGGER.exception("Unhandled exception during peer refresh") def stop(self): self._stopped = True for connection_id in self._connection_statuses: try: if self._connection_statuses[connection_id] == \ PeerStatus.CLOSED: continue msg = DisconnectMessage() self._network.send( validator_pb2.Message.NETWORK_DISCONNECT, msg.SerializeToString(), connection_id) self._connection_statuses[connection_id] = PeerStatus.CLOSED except ValueError: pass def _get_peered_connections(self): peers = self._gossip.get_peers() return [conn_id for conn_id in peers if self._connection_statuses[conn_id] == PeerStatus.PEER] def _request_chain_head(self, peered_connections): for conn_id in peered_connections: self._gossip.send_block_request("HEAD", conn_id) def retry_dynamic_peering(self): self._refresh_peer_list(self._gossip.get_peers()) peers = self._gossip.get_peers() peer_count = len(peers) if peer_count < self._min_peers: LOGGER.debug( "Number of peers (%s) below " "minimum peer threshold (%s). " "Doing topology search.", peer_count, self._min_peers) self._reset_candidate_peer_endpoints() self._refresh_peer_list(peers) self._refresh_connection_list() self._check_temp_endpoints() peers = self._gossip.get_peers() self._get_peers_of_peers(peers) self._get_peers_of_endpoints( peers, self._initial_seed_endpoints) time.sleep(self._response_duration) peered_endpoints = list(peers.values()) with self._lock: unpeered_candidates = list( set(self._candidate_peer_endpoints) - set(peered_endpoints) - set([self._endpoint])) LOGGER.debug( "Peers are: %s. " "Unpeered candidates are: %s", peered_endpoints, unpeered_candidates) if unpeered_candidates: self._attempt_to_peer_with_endpoint( random.choice(unpeered_candidates)) def retry_static_peering(self): with self._lock: to_remove = [] for endpoint in self._initial_peer_endpoints: connection_id = None try: connection_id = \ self._network.get_connection_id_by_endpoint(endpoint) except KeyError: pass static_peer_info = self._static_peer_status[endpoint] if connection_id is not None: if connection_id in self._connection_statuses: if self._connection_statuses[connection_id] == \ PeerStatus.PEER: self._static_peer_status[endpoint] = \ StaticPeerInfo( time=0, retry_threshold=INITIAL_RETRY_FREQUENCY, count=0) continue if (time.time() - static_peer_info.time) > \ static_peer_info.retry_threshold: LOGGER.debug("Endpoint has not completed authorization in " "%s seconds: %s", static_peer_info.retry_threshold, endpoint) if connection_id is not None: try: self._network.remove_connection(connection_id) except KeyError: pass if static_peer_info.retry_threshold == \ MAXIMUM_STATIC_RETRY_FREQUENCY: if static_peer_info.count >= MAXIMUM_STATIC_RETRIES: to_remove.append(endpoint) continue else: self._static_peer_status[endpoint] = \ StaticPeerInfo( time=time.time(), retry_threshold=min( static_peer_info.retry_threshold * 2, MAXIMUM_STATIC_RETRY_FREQUENCY), count=static_peer_info.count + 1) else: self._static_peer_status[endpoint] = \ StaticPeerInfo( time=time.time(), retry_threshold=min( static_peer_info.retry_threshold * 2, MAXIMUM_STATIC_RETRY_FREQUENCY), count=0) LOGGER.debug("attempting to peer with %s", endpoint) self._network.add_outbound_connection(endpoint) self._temp_endpoints[endpoint] = EndpointInfo( EndpointStatus.PEERING, time.time(), INITIAL_RETRY_FREQUENCY) for endpoint in to_remove: self._initial_peer_endpoints.remove(endpoint) del self._static_peer_status[endpoint] def add_candidate_peer_endpoints(self, peer_endpoints): with self._lock: for endpoint in peer_endpoints: if endpoint not in self._candidate_peer_endpoints: self._candidate_peer_endpoints.append(endpoint) def set_connection_status(self, connection_id, status): self._connection_statuses[connection_id] = status def remove_temp_endpoint(self, endpoint): with self._lock: if endpoint in self._temp_endpoints: del self._temp_endpoints[endpoint] def _check_temp_endpoints(self): with self._lock: for endpoint in self._temp_endpoints: endpoint_info = self._temp_endpoints[endpoint] if (time.time() - endpoint_info.time) > \ endpoint_info.retry_threshold: LOGGER.debug("Endpoint has not completed authorization in " "%s seconds: %s", endpoint_info.retry_threshold, endpoint) try: conn_id = \ self._network.get_connection_id_by_endpoint( endpoint) self._network.remove_connection(conn_id) except KeyError: pass self._network.add_outbound_connection(endpoint) self._temp_endpoints[endpoint] = EndpointInfo( endpoint_info.status, time.time(), min(endpoint_info.retry_threshold * 2, MAXIMUM_RETRY_FREQUENCY)) def _refresh_peer_list(self, peers): for conn_id in peers: try: self._network.get_connection_id_by_endpoint( peers[conn_id]) except KeyError: LOGGER.debug("removing peer %s because " "connection went away", peers[conn_id]) self._gossip.unregister_peer(conn_id) if conn_id in self._connection_statuses: del self._connection_statuses[conn_id] def _refresh_connection_list(self): with self._lock: closed_connections = [] for connection_id in self._connection_statuses: if not self._network.has_connection(connection_id): closed_connections.append(connection_id) for connection_id in closed_connections: del self._connection_statuses[connection_id] def _get_peers_of_peers(self, peers): get_peers_request = GetPeersRequest() for conn_id in peers: try: self._network.send( validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST, get_peers_request.SerializeToString(), conn_id) except ValueError: LOGGER.debug("Peer disconnected: %s", conn_id) def _get_peers_of_endpoints(self, peers, endpoints): get_peers_request = GetPeersRequest() for endpoint in endpoints: conn_id = None try: conn_id = self._network.get_connection_id_by_endpoint( endpoint) except KeyError: with self._lock: if endpoint in self._temp_endpoints: del self._temp_endpoints[endpoint] self._temp_endpoints[endpoint] = EndpointInfo( EndpointStatus.TOPOLOGY, time.time(), INITIAL_RETRY_FREQUENCY) self._network.add_outbound_connection(endpoint) if conn_id is not None: if not self._network.is_connection_handshake_complete(conn_id): continue elif conn_id in peers: continue else: # connected but not peered if endpoint in self._temp_endpoints: # Endpoint is not yet authorized, do not request peers continue try: self._network.send( validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST, get_peers_request.SerializeToString(), conn_id) except ValueError: LOGGER.debug("Connection disconnected: %s", conn_id) def _attempt_to_peer_with_endpoint(self, endpoint): LOGGER.debug("Attempting to connect/peer with %s", endpoint) # check if the connection exists, if it does - send, # otherwise create it try: connection_id = \ self._network.get_connection_id_by_endpoint( endpoint) register_request = PeerRegisterRequest( endpoint=self._endpoint, protocol_version=NETWORK_PROTOCOL_VERSION) self._network.send( validator_pb2.Message.GOSSIP_REGISTER, register_request.SerializeToString(), connection_id, callback=partial( self._peer_callback, endpoint=endpoint, connection_id=connection_id)) except KeyError: # if the connection uri wasn't found in the network's # connections, it raises a KeyError and we need to add # a new outbound connection with self._lock: self._temp_endpoints[endpoint] = EndpointInfo( EndpointStatus.PEERING, time.time(), INITIAL_RETRY_FREQUENCY) self._network.add_outbound_connection(endpoint) def _reset_candidate_peer_endpoints(self): with self._lock: self._candidate_peer_endpoints = [] def _peer_callback(self, request, result, connection_id, endpoint=None): with self._lock: ack = NetworkAcknowledgement() ack.ParseFromString(result.content) if ack.status == ack.ERROR: LOGGER.debug("Peering request to %s was NOT successful", connection_id) self._remove_temporary_connection(connection_id) elif ack.status == ack.OK: LOGGER.debug("Peering request to %s was successful", connection_id) if endpoint: try: self._gossip.register_peer(connection_id, endpoint) self._connection_statuses[connection_id] = \ PeerStatus.PEER self._gossip.send_block_request("HEAD", connection_id) except PeeringException as e: # Remove unsuccessful peer LOGGER.warning('Unable to successfully peer with ' 'connection_id: %s, due to %s', connection_id, str(e)) self._remove_temporary_connection(connection_id) else: LOGGER.debug("Cannot register peer with no endpoint for " "connection_id: %s", connection_id) self._remove_temporary_connection(connection_id) def _remove_temporary_connection(self, connection_id): status = self._connection_statuses.get(connection_id) if status == PeerStatus.TEMP: LOGGER.debug("Closing connection to %s", connection_id) msg = DisconnectMessage() try: self._network.send(validator_pb2.Message.NETWORK_DISCONNECT, msg.SerializeToString(), connection_id) except ValueError: pass del self._connection_statuses[connection_id] self._network.remove_connection(connection_id) elif status == PeerStatus.PEER: LOGGER.debug("Connection close request for peer ignored: %s", connection_id) elif status is None: LOGGER.debug("Connection close request for unknown connection " "ignored: %s", connection_id) def connect_success(self, connection_id): endpoint = self._network.connection_id_to_endpoint(connection_id) endpoint_info = self._temp_endpoints.get(endpoint) LOGGER.debug("Endpoint has completed authorization: %s (id: %s)", endpoint, connection_id) if endpoint_info is None: LOGGER.debug("Received unknown endpoint: %s", endpoint) elif endpoint_info.status == EndpointStatus.PEERING: self._connect_success_peering(connection_id, endpoint) elif endpoint_info.status == EndpointStatus.TOPOLOGY: self._connect_success_topology(connection_id) else: LOGGER.debug("Endpoint has unknown status: %s", endpoint) with self._lock: if endpoint in self._temp_endpoints: del self._temp_endpoints[endpoint] def _connect_success_peering(self, connection_id, endpoint): LOGGER.debug("Connection to %s succeeded", connection_id) register_request = PeerRegisterRequest( endpoint=self._endpoint, protocol_version=NETWORK_PROTOCOL_VERSION) self._connection_statuses[connection_id] = PeerStatus.TEMP try: self._network.send( validator_pb2.Message.GOSSIP_REGISTER, register_request.SerializeToString(), connection_id, callback=partial( self._peer_callback, connection_id=connection_id, endpoint=endpoint)) except ValueError: LOGGER.debug("Connection disconnected: %s", connection_id) def _connect_success_topology(self, connection_id): LOGGER.debug("Connection to %s succeeded for topology request", connection_id) self._connection_statuses[connection_id] = PeerStatus.TEMP get_peers_request = GetPeersRequest() def callback(request, result): # request, result are ignored, but required by the callback self._remove_temporary_connection(connection_id) try: self._network.send( validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST, get_peers_request.SerializeToString(), connection_id, callback=callback) except ValueError: LOGGER.debug("Connection disconnected: %s", connection_id)
true
true
f7248bd68fe7de142bd9557e71df9eec370b4208
4,604
py
Python
colossalai/nn/optimizer/lamb.py
xdjiangkai/ColossalAI
4a3d3446b04065fa1c89b78cba673e96115c6325
[ "Apache-2.0" ]
1
2022-03-12T04:49:19.000Z
2022-03-12T04:49:19.000Z
colossalai/nn/optimizer/lamb.py
xdjiangkai/ColossalAI
4a3d3446b04065fa1c89b78cba673e96115c6325
[ "Apache-2.0" ]
null
null
null
colossalai/nn/optimizer/lamb.py
xdjiangkai/ColossalAI
4a3d3446b04065fa1c89b78cba673e96115c6325
[ "Apache-2.0" ]
1
2022-01-06T17:16:32.000Z
2022-01-06T17:16:32.000Z
""" Adapted from the pytorch-lamb library at https://github.com/cybertronai/pytorch-lamb """ import torch from torch.optim import Optimizer from colossalai.registry import OPTIMIZERS @OPTIMIZERS.register_module class Lamb(Optimizer): r"""Implements Lamb algorithm. It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) adam (bool, optional): always use trust ratio = 1, which turns this into Adam. Useful for comparison purposes. .. _Large Batch Optimization for Deep Learning\: Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962 """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0, adam=False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError( "Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError( "Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) self.adam = adam super(Lamb, self).__init__(params, defaults) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError( 'Lamb does not support sparse gradients, consider SparseAdam instad.') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Decay the first and second moment running average coefficient # m_t exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # v_t exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # Paper v3 does not use debiasing. # bias_correction1 = 1 - beta1 ** state['step'] # bias_correction2 = 1 - beta2 ** state['step'] # Apply bias to lr to avoid broadcast. # * math.sqrt(bias_correction2) / bias_correction1 step_size = group['lr'] weight_norm = p.data.pow(2).sum().sqrt() adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps']) if group['weight_decay'] != 0: adam_step.add_(p.data, alpha=group['weight_decay']) adam_norm = adam_step.pow(2).sum().sqrt() if weight_norm == 0 or adam_norm == 0: trust_ratio = 1 else: trust_ratio = weight_norm / adam_norm state['weight_norm'] = weight_norm state['adam_norm'] = adam_norm state['trust_ratio'] = trust_ratio if self.adam: trust_ratio = 1 p.data.add_(adam_step, alpha=-step_size * trust_ratio) return loss
39.350427
103
0.553649
import torch from torch.optim import Optimizer from colossalai.registry import OPTIMIZERS @OPTIMIZERS.register_module class Lamb(Optimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0, adam=False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError( "Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError( "Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) self.adam = adam super(Lamb, self).__init__(params, defaults) def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError( 'Lamb does not support sparse gradients, consider SparseAdam instad.') state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p.data) state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) step_size = group['lr'] weight_norm = p.data.pow(2).sum().sqrt() adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps']) if group['weight_decay'] != 0: adam_step.add_(p.data, alpha=group['weight_decay']) adam_norm = adam_step.pow(2).sum().sqrt() if weight_norm == 0 or adam_norm == 0: trust_ratio = 1 else: trust_ratio = weight_norm / adam_norm state['weight_norm'] = weight_norm state['adam_norm'] = adam_norm state['trust_ratio'] = trust_ratio if self.adam: trust_ratio = 1 p.data.add_(adam_step, alpha=-step_size * trust_ratio) return loss
true
true
f7248e344028eb2f0e09ee718bbf90134b69c45e
29,546
py
Python
pyqubo/array.py
OpenJij/pyqubo
47190d3391c83c1c84636ab8f8bff67c8f935dc0
[ "Apache-2.0" ]
null
null
null
pyqubo/array.py
OpenJij/pyqubo
47190d3391c83c1c84636ab8f8bff67c8f935dc0
[ "Apache-2.0" ]
null
null
null
pyqubo/array.py
OpenJij/pyqubo
47190d3391c83c1c84636ab8f8bff67c8f935dc0
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 Recruit Communications Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .core import Spin, Binary, Express import dimod from dimod.decorators import vartype_argument import numpy as np from operator import mul, add from six.moves import reduce class Array: """Multi-dimensional array. Args: bit_list (list/:class:`numpy.ndarray`): The object from which a new array is created. Accepted input: * (Nested) list of :class:`Express`, :class:`Array`, int or float. * numpy.ndarray Attributes: shape (tuple[int]): Shape of this array. Example: Create a new array with Binary. >>> from pyqubo import Array, Binary >>> Array.create('x', shape=(2, 2), vartype='BINARY') Array([[Binary(x[0][0]), Binary(x[0][1])], [Binary(x[1][0]), Binary(x[1][1])]]) Create a new array from a nested list of :class:`Express`. >>> array = Array([[Binary('x0'), Binary('x1')], [Binary('x2'), Binary('x3')]]) >>> array Array([[Binary(x0), Binary(x1)], [Binary(x2), Binary(x3)]]) Get the shape of the array. >>> array.shape (2, 2) Access an element with index. >>> array[0, 0] # = array[(0, 0)] Binary(x0) Use slice ":" to select a subset of the array. >>> array[:, 1] # = array[(slice(None), 1)] Array([Binary(x1), Binary(x3)]) >>> sum(array[:, 1]) (Binary(x1)+Binary(x3)) Use list or tuple to select a subset of the array. >>> array[[0, 1], 1] Array([Binary(x1), Binary(x3)]) >>> array[(0, 1), 1] Array([Binary(x1), Binary(x3)]) Create an array from numpy array. >>> import numpy as np >>> Array(np.array([[1, 2], [3, 4]])) Array([[1, 2], [3, 4]]) Create an array from list of :class:`Array`. >>> Array([Array([1, 2]), Array([3, 4])]) Array([[1, 2], [3, 4]]) """ def __init__(self, bit_list): if isinstance(bit_list, np.ndarray): self.shape = bit_list.shape self.bit_list = bit_list.tolist() elif isinstance(bit_list, list): def get_shape(l): if isinstance(l, list) or isinstance(l, Array) or isinstance(l, np.ndarray): length = len(l) shape_set = {get_shape(e) for e in l} if len(shape_set) == 1: sub_shape = shape_set.pop() return tuple([length] + list(sub_shape)) else: raise ValueError('Cannot determine the shape of input nested list.') else: return tuple() def normalize_type(l): if isinstance(l, list): return [normalize_type(e) for e in l] elif isinstance(l, Array): return [normalize_type(e) for e in l.bit_list] elif isinstance(l, np.ndarray): return [normalize_type(e) for e in l.tolist()] else: return l self.shape = get_shape(bit_list) self.bit_list = normalize_type(bit_list) else: raise TypeError('argument should be ndarray or list') def __len__(self): return self.shape[0] def __getitem__(self, key): """Get a subset of this array. Args: key (int/tuple[int]): Index of array. Returns: :class:`Express`/:class:`Array`/int/float Example: >>> array = Array.create('x', (2, 3, 2), 'BINARY') >>> array Array([[[Binary(x[0][0][0]), Binary(x[0][0][1])], [Binary(x[0][1][0]), Binary(x[0][1][1])], [Binary(x[0][2][0]), Binary(x[0][2][1])]], [[Binary(x[1][0][0]), Binary(x[1][0][1])], [Binary(x[1][1][0]), Binary(x[1][1][1])], [Binary(x[1][2][0]), Binary(x[1][2][1])]]]) >>> array[0, 1, 1] Binary(x[0][1][1]) >>> array[:, :, 1] """ if isinstance(key, int): key = key, elif not isinstance(key, tuple): raise TypeError("Key should be int or tuple of int") def get_item(l, index): if len(index) > 1: current_index = index[0] if isinstance(current_index, int): return get_item(l[current_index], index[1:]) elif isinstance(current_index, list) or isinstance(current_index, tuple): return [get_item(l[i], index[1:]) for i in current_index] else: return [get_item(e, index[1:]) for e in l[current_index]] else: return l[index[0]] item = get_item(self.bit_list, key) if isinstance(item, list): return Array(item) else: return item def __repr__(self): nest_depth = len(self.shape) offset = len("Array(") def format_nested_list(nested_list, nest_count): if isinstance(nested_list[0], list): return '[{body}]'.format( body=',{line_feed}{indent}'.format( indent=' ' * (nest_count + offset), line_feed='\n' * (nest_depth - nest_count) ).join([format_nested_list(sub_list, nest_count+1) for sub_list in nested_list]) ) else: return '[%s]' % ', '.join(map(str, nested_list)) return 'Array({body})'.format(body=format_nested_list(self.bit_list, 1)) def __eq__(self, other): if not isinstance(other, Array): return False else: return self.bit_list == other.bit_list def __ne__(self, other): return not self.__eq__(other) # math operation def __neg__(self): minus_one = Array.fill(-1, self.shape) return self * minus_one def __radd__(self, other): """It is called when `other(number) + self`""" return self.__add__(other) def __add__(self, other): """It is called when `self + other(any object)`""" return self.add(other) def __rsub__(self, other): """It is called when `other(number) - self`""" return (-self).add(other) def __sub__(self, other): """It is called when `self - other(any object)`""" return self.subtract(other) def __rmul__(self, other): """It is called when `other(number) * self`""" return self.__mul__(other) def __mul__(self, other): """It is called when `self * other(any object)`""" return self.mul(other) def __div__(self, other): """It is called when `self / other(any object)`""" return self.div(other) def __rdiv__(self, other): """It is called when `other(number) / self`""" raise ValueError("Number cannot be divided by Expression.") def __truediv__(self, other): # pragma: no cover """division in Python3""" return self.__div__(other) def __rtruediv__(self, other): # pragma: no cover """It is called when `other(number) / self`""" return self.__rdiv__(other) def __matmul__(self, other): # pragma: no cover return self.matmul(other) def add(self, other): """Returns a sum of self and other. Args: other (:class:`Array`/:class:`ndarray`/int/float): Addend. Returns: :class:`Array` Example: >>> from pyqubo import Array, Binary >>> import numpy as np >>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), 2]]) >>> array_b = Array([[Binary('d'), 1], [Binary('f'), Binary('g')]]) >>> array_a.add(array_b) Array([[(Binary(a)+Binary(d)), (Binary(b)+Num(1))], [(Binary(c)+Binary(f)), (Binary(g)+Num(2))]]) >>> array_a + array_b Array([[(Binary(a)+Binary(d)), (Binary(b)+Num(1))], [(Binary(c)+Binary(f)), (Binary(g)+Num(2))]]) Sum of self and scalar value. >>> array_a + 5 Array([[(Binary(a)+Num(5)), (Binary(b)+Num(5))], [(Binary(c)+Num(5)), 7]]) Sum of self and numpy ndarray. >>> array_a + np.array([[1, 2], [3, 4]]) Array([[(Binary(a)+Num(1)), (Binary(b)+Num(2))], [(Binary(c)+Num(3)), 6]]) """ return self._pairwise_op_with_type_check(other, lambda x, y: x + y) def subtract(self, other): """Returns a difference between other and self. Args: other (:class:`Array`/:class:`ndarray`/int/float): Subtrahend. Returns: :class:`Array` Example: >>> from pyqubo import Array, Binary >>> import numpy as np >>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), 2]]) >>> array_b = Array([[Binary('d'), 1], [Binary('f'), Binary('g')]]) >>> array_a.subtract(array_b) Array([[(Binary(a)+(Binary(d)*Num(-1))), (Binary(b)+Num(-1))], [(Binary(c)+(Binary(f)*Num(-1))), ((Binary(g)*Num(-1))+Num(2))]]) >>> array_a - array_b Array([[(Binary(a)+(Binary(d)*Num(-1))), (Binary(b)+Num(-1))], [(Binary(c)+(Binary(f)*Num(-1))), ((Binary(g)*Num(-1))+Num(2))]]) Difference of self and scalar value. >>> array_a - 5 Array([[(Binary(a)+Num(-5)), (Binary(b)+Num(-5))], [(Binary(c)+Num(-5)), -3]]) Difference of self and numpy ndarray. >>> array_a - np.array([[1, 2], [3, 4]]) Array([[(Binary(a)+Num(-1)), (Binary(b)+Num(-2))], [(Binary(c)+Num(-3)), -2]]) """ return self._pairwise_op_with_type_check(other, lambda x, y: x - y) def mul(self, other): """Returns a multiplicity of self by other. Args: other (:class:`Array`/:class:`ndarray`/int/float): Factor. Returns: :class:`Array` Example: >>> from pyqubo import Array, Binary >>> import numpy as np >>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), 2]]) >>> array_b = Array([[Binary('d'), 1], [Binary('f'), Binary('g')]]) >>> array_a.mul(array_b) Array([[(Binary(a)*Binary(d)), (Binary(b)*Num(1))], [(Binary(c)*Binary(f)), (Binary(g)*Num(2))]]) >>> array_a * array_b Array([[(Binary(a)*Binary(d)), (Binary(b)*Num(1))], [(Binary(c)*Binary(f)), (Binary(g)*Num(2))]]) Product of self and scalar value. >>> array_a * 5 Array([[(Binary(a)*Num(5)), (Binary(b)*Num(5))], [(Binary(c)*Num(5)), 10]]) Product of self and numpy ndarray. >>> array_a * np.array([[1, 2], [3, 4]]) Array([[(Binary(a)*Num(1)), (Binary(b)*Num(2))], [(Binary(c)*Num(3)), 8]]) """ return self._pairwise_op_with_type_check(other, lambda x, y: x * y) def div(self, other): """Returns division of self by other. Args: other (int/float): Divisor. Returns: :class:`Array` Example: >>> from pyqubo import Array, Binary >>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), 2]]) >>> array_a / 5 Array([[(Binary(a)*Num(0.2)), (Binary(b)*Num(0.2))], [(Binary(c)*Num(0.2)), 0.4]]) """ if not isinstance(other, Array): return self * (other ** -1) else: raise ValueError("Expression cannot be divided by Expression.") @staticmethod @vartype_argument('vartype') def create(name, shape, vartype): """Create a new array with Spins or Binary. Args: name (str): Name of the matrix. It is used as a part of the label of variables. For example, if the name is 'x', the label of `(i, j)` th variable will be ``x[i][j]``. shape (int/tuple[int]): Dimensions of the array. vartype (:class:`dimod.Vartype`/str/set, optional): Variable type of the solution. Accepted input values: * :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}`` * :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}`` Example: >>> from pyqubo import Array >>> array = Array.create('x', shape=(2, 2), vartype='BINARY') >>> array Array([[Binary(x[0][0]), Binary(x[0][1])], [Binary(x[1][0]), Binary(x[1][1])]]) >>> array[0] Array([Binary(x[0][0]), Binary(x[0][1])]) """ if isinstance(shape, int): shape = shape, if vartype == dimod.BINARY: var_class = Binary else: var_class = Spin def var_name(_name, index): return "{name}{index_repr}".format( name=_name, index_repr=''.join(['[%d]' % i for i in index])) def create_structure(index): return {var_name(name, index): tuple([name] + index)} def generator(index): return var_class(var_name(name, index), create_structure(index)) return Array._create_with_generator(shape, generator) @staticmethod def fill(obj, shape): """Create a new array with the given shape, all filled with the given object. Args: obj (int/float/:class:`Express`): The object with which a new array is filled. shape (tuple[int]): Shape of the array. Returns: :class:`Array`: Created array. Example: >>> from pyqubo import Array, Binary >>> Array.fill(Binary('a'), shape=(2, 3)) Array([[Binary(a), Binary(a), Binary(a)], [Binary(a), Binary(a), Binary(a)]]) """ return Array._create_with_generator(shape, lambda _: obj) @staticmethod def _create_with_generator(shape, generator): """Returns an array with objects which `generator` created. Args: shape (tuple[int]): Shape of the array. generator (list[int] =>:class:`Express`): Function to generate :class:`Express`:. Type of the argument of the generator is ``list[int]``. Returns: :class:`Array`: Created array. """ _shape_list = list(shape) def create_internal(shape_list, index): if len(shape_list) > 1: length = shape_list[0] return [create_internal(shape_list[1:], index + [i]) for i in range(length)] else: length = shape_list[0] return [generator(index+[i]) for i in range(length)] return Array(create_internal(_shape_list, [])) def _pairwise_op_with_type_check(self, other, operation): """Pairwise operation with type check. Args: other (:class:`Array`/:class:`ndarray`/int/float): The other object in operation. operation (:class:`Express`, :class:`Express` => :class:`Express`): Operation. Returns: :class:`Array` """ if isinstance(other, np.ndarray): other = Array(other) elif isinstance(other, int) or isinstance(other, float) or isinstance(other, Express): other = Array.fill(other, self.shape) elif not isinstance(other, Array): raise TypeError('Operation of Array cannot be done with type:{type}' .format(type=type(other))) return self._pairwise_op(other, operation) def _pairwise_op(self, other, operation): """Pairwise operation Args: other (:class:`Array`): The other object in operation. operation (:class:`Express`, :class:`Express` => :class:`Express`): Operation Returns: :class:`Array` """ if not isinstance(other, Array): # pragma: no cover raise TypeError('Type of `other` is not a `Array` instance.') elif not self.shape == other.shape: raise ValueError('Shape of other is not same as that of self.') else: def operate(l1, l2): if isinstance(l1, list): return [operate(e1, e2) for e1, e2 in zip(l1, l2)] else: return operation(l1, l2) return Array(operate(self.bit_list, other.bit_list)) @property def T(self): """Returns a transposed array. Example: >>> from pyqubo import Array >>> array = Array.create('x', shape=(2, 3), vartype='BINARY') >>> array Array([[Binary(x[0][0]), Binary(x[0][1]), Binary(x[0][2])], [Binary(x[1][0]), Binary(x[1][1]), Binary(x[1][2])]]) >>> array.T Array([[Binary(x[0][0]), Binary(x[1][0])], [Binary(x[0][1]), Binary(x[1][1])], [Binary(x[0][2]), Binary(x[1][2])]]) """ def generator(index): return self[tuple(index[::-1])] return Array._create_with_generator(self.shape[::-1], generator) def dot(self, other): """Returns a dot product of two arrays. Args: other (:class:`Array`): Array. Returns: :class:`Express`/:class:`Array` Example: Dot calculation falls into four patterns. 1. If both `self` and `other` are 1-D arrays, it is inner product of vectors. >>> from pyqubo import Array, Binary >>> array_a = Array([Binary('a'), Binary('b')]) >>> array_b = Array([Binary('c'), Binary('d')]) >>> array_a.dot(array_b) ((Binary(a)*Binary(c))+(Binary(b)*Binary(d))) 2. If `self` is an N-D array and `other` is a 1-D array,\ it is a sum product over the last axis of `self` and `other`. >>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), Binary('d')]]) >>> array_b = Array([Binary('e'), Binary('f')]) >>> array_a.dot(array_b) Array([((Binary(a)*Binary(e))+(Binary(b)*Binary(f))), \ ((Binary(c)*Binary(e))+(Binary(d)*Binary(f)))]) 3. If both `self` and `other` are 2-D arrays, it is matrix multiplication. >>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), Binary('d')]]) >>> array_b = Array([[Binary('e'), Binary('f')], [Binary('g'), Binary('h')]]) >>> array_a.dot(array_b) Array([[((Binary(a)*Binary(e))+(Binary(b)*Binary(g))), \ ((Binary(a)*Binary(f))+(Binary(b)*Binary(h)))], [((Binary(c)*Binary(e))+(Binary(d)*Binary(g))), \ ((Binary(c)*Binary(f))+(Binary(d)*Binary(h)))]]) 4. If `self` is an N-D array and `other` is an M-D array (where N, M>=2),\ it is a sum product over the last axis of `self` and\ the second-to-last axis of `other`. If N = M = 3,\ (i, j, k, m) element of a dot product of `self` and `other` is: .. code-block:: python dot(self, other)[i,j,k,m] = sum(self[i,j,:] * other[k,:,m]) >>> array_a = Array.create('a', shape=(3, 2, 4), vartype='BINARY') >>> array_a.shape (3, 2, 4) >>> array_b = Array.create('b', shape=(5, 4, 3), vartype='BINARY') >>> array_b.shape (5, 4, 3) >>> i, j, k, m = (1, 1, 3, 2) >>> array_a.dot(array_b)[i, j, k, m] == sum(array_a[i, j, :] * array_b[k, :, m]) True Dot product with list. >>> array_a = Array([Binary('a'), Binary('b')]) >>> array_b = [3, 4] >>> array_a.dot(array_b) ((Binary(a)*Num(3))+(Binary(b)*Num(4))) """ if isinstance(other, np.ndarray) or isinstance(other, list): other = Array(other) if not isinstance(other, Array): raise TypeError("Type of argument should be Array") # pattern 1 (see docstring) if len(self.shape) == 1 and len(other.shape) == 1 and self.shape[0] == other.shape[0]: return sum(self.mul(other)) # pattern 2 elif len(self.shape) == 2 and len(other.shape) == 1: return Array([sum(v * other) for v in self]) # pattern 3 and 4 else: return self._dot_matrix(other) def _dot_matrix(self, other): """Returns a dot product of N-D array self and M-D array other (where N, M>=2). """ assert isinstance(other, Array), "Type should be Array, not {type}".format(type=type(other)) assert self.shape[-1] == other.shape[-2],\ "self.shape[-1] should be equal other.shape[-2].\n" +\ "For more details, see https://pyqubo.readthedocs.io/en/latest/reference/array.html" vector_indices = slice(0, self.shape[-1], None) new_shape = self.shape[:-1] + other.shape[:-2] + (other.shape[-1],) def generator(index): half = len(self.shape) - 1 index_self = tuple(index[:half]) + (vector_indices,) index_other = tuple(index[half:-1]) + (vector_indices,) + (index[-1],) vector_self = self[index_self] vector_other = other[index_other] return sum(vector_self * vector_other) return Array._create_with_generator(new_shape, generator) def matmul(self, other): """Returns a matrix product of two arrays. Note: You can use operator symbol '@' instead of :obj:`matmul()` in Python 3.5 or later version. >>> from pyqubo import Array >>> array_a = Array.create('a', shape=(2, 4), vartype='BINARY') >>> array_b = Array.create('b', shape=(4, 3), vartype='BINARY') >>> array_a @ array_b == array_a.matmul(array_b) True Args: other (:class:`Array`/:class:`numpy.ndarray`/list): Returns: :class:`Array`/:class:`Express` Example: Matrix product of two arrays falls into 3 patterns. 1. If either of the arguments is 1-D array, it is treated as a matrix where one is added to its dimension. >>> from pyqubo import Array, Binary >>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), Binary('d')]]) >>> array_b = Array([Binary('e'), Binary('f')]) >>> array_a.matmul(array_b) Array([((Binary(a)*Binary(e))+(Binary(b)*Binary(f))), \ ((Binary(c)*Binary(e))+(Binary(d)*Binary(f)))]) 2. If both arguments are 2-D array, conventional matrix product is calculated. >>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), Binary('d')]]) >>> array_b = Array([[Binary('e'), Binary('f')], [Binary('g'), Binary('h')]]) >>> array_a.matmul(array_b) Array([[((Binary(a)*Binary(e))+(Binary(b)*Binary(g))), \ ((Binary(a)*Binary(f))+(Binary(b)*Binary(h)))], [((Binary(c)*Binary(e))+(Binary(d)*Binary(g))), \ ((Binary(c)*Binary(f))+(Binary(d)*Binary(h)))]]) 3. If either argument is N-D (where N > 2), it is treated as an array whose element is a 2-D matrix of last two indices. In this example, `array_a` is treated as if it is a vector whose elements are two matrices of shape (2, 3). >>> array_a = Array.create('a', shape=(2, 2, 3), vartype='BINARY') >>> array_b = Array.create('b', shape=(3, 2), vartype='BINARY') >>> (array_a @ array_b)[0] == array_a[0].matmul(array_b) True """ if isinstance(other, np.ndarray) or isinstance(other, list): other = Array(other) assert isinstance(other, Array), "Type should be Array, not {type}".format(type=type(other)) # pattern 1 (see docstring) if len(self.shape) == 1 or len(other.shape) == 1: return self.dot(other) # pattern 2 and 3 else: return self._matmul_matrix(other) def _matmul_matrix(self, other): assert isinstance(other, Array), "Type should be Array, not {type}".format(type=type(other)) assert len(self.shape) >= 2 and len(other.shape) >= 2, "Shape should be greater than 2" assert self.shape[-1] == other.shape[-2], \ "self.shape[-1] should be equal other.shape[-2].\n" + \ "For more details, see https://pyqubo.readthedocs.io/en/latest/reference/array.html" self_shape_len = len(self.shape) other_shape_len = len(other.shape) common_len = min(self_shape_len, other_shape_len) for s1, s2 in zip(self.shape[-common_len:-2], other.shape[-common_len:-2]): assert s1 == s2, "Shape doesn't match." longer_shape = self.shape if self_shape_len > other_shape_len else other.shape new_shape = longer_shape[:-2] + (self.shape[-2], other.shape[-1]) def generator(index): mat_index_self = tuple(index[-self_shape_len:][:-2]) mat_index_other = tuple(index[-other_shape_len:][:-2]) mat_self = self[mat_index_self] if mat_index_self != () else self mat_other = other[mat_index_other] if mat_index_other != () else other j = index[-1] i = index[-2] return mat_self[i, :].dot(mat_other[:, j]) return Array._create_with_generator(new_shape, generator) @staticmethod def _calc_steps(shape): """Returns steps of shape. Step is used to create an 1-dim index from n-dim index like >>> steps = Array._calc_steps(shape) >>> one_dim_index = sum(step * i for step, i in zip(steps, n_dim_index)) """ steps = [] tmp_d = 1 for d in shape[::-1]: steps.append(tmp_d) tmp_d *= d steps = steps[::-1] return steps def reshape(self, new_shape): """Returns a reshaped array. Args: new_shape (tuple[int]): New shape. Example: >>> from pyqubo import Array >>> array = Array.create('x', shape=(2, 3), vartype='BINARY') >>> array Array([[Binary(x[0][0]), Binary(x[0][1]), Binary(x[0][2])], [Binary(x[1][0]), Binary(x[1][1]), Binary(x[1][2])]]) >>> array.reshape((3, 2, 1)) Array([[[Binary(x[0][0])], [Binary(x[0][1])]],\ [[Binary(x[0][2])], [Binary(x[1][0])]],\ [[Binary(x[1][1])], [Binary(x[1][2])]]]) """ assert reduce(mul, self.shape) == reduce(mul, new_shape),\ "cannot reshape array of size {p} into shape {new_shape}".format( p=reduce(mul, self.shape), new_shape=new_shape) def calc_one_dim_array(nested_list): if isinstance(nested_list, list): return reduce(add, [calc_one_dim_array(e) for e in nested_list]) else: return [nested_list] # create an 1-dim array from the n-dim array one_dim_array = calc_one_dim_array(self.bit_list) new_steps = Array._calc_steps(new_shape) def generator(index): # create an index for 1-dim array from the given index one_dim_index = sum(step * i for step, i in zip(new_steps, index)) return one_dim_array[one_dim_index] return Array._create_with_generator(new_shape, generator)
36.978723
100
0.504874
from .core import Spin, Binary, Express import dimod from dimod.decorators import vartype_argument import numpy as np from operator import mul, add from six.moves import reduce class Array: def __init__(self, bit_list): if isinstance(bit_list, np.ndarray): self.shape = bit_list.shape self.bit_list = bit_list.tolist() elif isinstance(bit_list, list): def get_shape(l): if isinstance(l, list) or isinstance(l, Array) or isinstance(l, np.ndarray): length = len(l) shape_set = {get_shape(e) for e in l} if len(shape_set) == 1: sub_shape = shape_set.pop() return tuple([length] + list(sub_shape)) else: raise ValueError('Cannot determine the shape of input nested list.') else: return tuple() def normalize_type(l): if isinstance(l, list): return [normalize_type(e) for e in l] elif isinstance(l, Array): return [normalize_type(e) for e in l.bit_list] elif isinstance(l, np.ndarray): return [normalize_type(e) for e in l.tolist()] else: return l self.shape = get_shape(bit_list) self.bit_list = normalize_type(bit_list) else: raise TypeError('argument should be ndarray or list') def __len__(self): return self.shape[0] def __getitem__(self, key): if isinstance(key, int): key = key, elif not isinstance(key, tuple): raise TypeError("Key should be int or tuple of int") def get_item(l, index): if len(index) > 1: current_index = index[0] if isinstance(current_index, int): return get_item(l[current_index], index[1:]) elif isinstance(current_index, list) or isinstance(current_index, tuple): return [get_item(l[i], index[1:]) for i in current_index] else: return [get_item(e, index[1:]) for e in l[current_index]] else: return l[index[0]] item = get_item(self.bit_list, key) if isinstance(item, list): return Array(item) else: return item def __repr__(self): nest_depth = len(self.shape) offset = len("Array(") def format_nested_list(nested_list, nest_count): if isinstance(nested_list[0], list): return '[{body}]'.format( body=',{line_feed}{indent}'.format( indent=' ' * (nest_count + offset), line_feed='\n' * (nest_depth - nest_count) ).join([format_nested_list(sub_list, nest_count+1) for sub_list in nested_list]) ) else: return '[%s]' % ', '.join(map(str, nested_list)) return 'Array({body})'.format(body=format_nested_list(self.bit_list, 1)) def __eq__(self, other): if not isinstance(other, Array): return False else: return self.bit_list == other.bit_list def __ne__(self, other): return not self.__eq__(other) def __neg__(self): minus_one = Array.fill(-1, self.shape) return self * minus_one def __radd__(self, other): return self.__add__(other) def __add__(self, other): return self.add(other) def __rsub__(self, other): return (-self).add(other) def __sub__(self, other): return self.subtract(other) def __rmul__(self, other): return self.__mul__(other) def __mul__(self, other): return self.mul(other) def __div__(self, other): return self.div(other) def __rdiv__(self, other): raise ValueError("Number cannot be divided by Expression.") def __truediv__(self, other): return self.__div__(other) def __rtruediv__(self, other): return self.__rdiv__(other) def __matmul__(self, other): return self.matmul(other) def add(self, other): return self._pairwise_op_with_type_check(other, lambda x, y: x + y) def subtract(self, other): return self._pairwise_op_with_type_check(other, lambda x, y: x - y) def mul(self, other): return self._pairwise_op_with_type_check(other, lambda x, y: x * y) def div(self, other): if not isinstance(other, Array): return self * (other ** -1) else: raise ValueError("Expression cannot be divided by Expression.") @staticmethod @vartype_argument('vartype') def create(name, shape, vartype): if isinstance(shape, int): shape = shape, if vartype == dimod.BINARY: var_class = Binary else: var_class = Spin def var_name(_name, index): return "{name}{index_repr}".format( name=_name, index_repr=''.join(['[%d]' % i for i in index])) def create_structure(index): return {var_name(name, index): tuple([name] + index)} def generator(index): return var_class(var_name(name, index), create_structure(index)) return Array._create_with_generator(shape, generator) @staticmethod def fill(obj, shape): return Array._create_with_generator(shape, lambda _: obj) @staticmethod def _create_with_generator(shape, generator): _shape_list = list(shape) def create_internal(shape_list, index): if len(shape_list) > 1: length = shape_list[0] return [create_internal(shape_list[1:], index + [i]) for i in range(length)] else: length = shape_list[0] return [generator(index+[i]) for i in range(length)] return Array(create_internal(_shape_list, [])) def _pairwise_op_with_type_check(self, other, operation): if isinstance(other, np.ndarray): other = Array(other) elif isinstance(other, int) or isinstance(other, float) or isinstance(other, Express): other = Array.fill(other, self.shape) elif not isinstance(other, Array): raise TypeError('Operation of Array cannot be done with type:{type}' .format(type=type(other))) return self._pairwise_op(other, operation) def _pairwise_op(self, other, operation): if not isinstance(other, Array): raise TypeError('Type of `other` is not a `Array` instance.') elif not self.shape == other.shape: raise ValueError('Shape of other is not same as that of self.') else: def operate(l1, l2): if isinstance(l1, list): return [operate(e1, e2) for e1, e2 in zip(l1, l2)] else: return operation(l1, l2) return Array(operate(self.bit_list, other.bit_list)) @property def T(self): def generator(index): return self[tuple(index[::-1])] return Array._create_with_generator(self.shape[::-1], generator) def dot(self, other): if isinstance(other, np.ndarray) or isinstance(other, list): other = Array(other) if not isinstance(other, Array): raise TypeError("Type of argument should be Array") if len(self.shape) == 1 and len(other.shape) == 1 and self.shape[0] == other.shape[0]: return sum(self.mul(other)) elif len(self.shape) == 2 and len(other.shape) == 1: return Array([sum(v * other) for v in self]) else: return self._dot_matrix(other) def _dot_matrix(self, other): assert isinstance(other, Array), "Type should be Array, not {type}".format(type=type(other)) assert self.shape[-1] == other.shape[-2],\ "self.shape[-1] should be equal other.shape[-2].\n" +\ "For more details, see https://pyqubo.readthedocs.io/en/latest/reference/array.html" vector_indices = slice(0, self.shape[-1], None) new_shape = self.shape[:-1] + other.shape[:-2] + (other.shape[-1],) def generator(index): half = len(self.shape) - 1 index_self = tuple(index[:half]) + (vector_indices,) index_other = tuple(index[half:-1]) + (vector_indices,) + (index[-1],) vector_self = self[index_self] vector_other = other[index_other] return sum(vector_self * vector_other) return Array._create_with_generator(new_shape, generator) def matmul(self, other): if isinstance(other, np.ndarray) or isinstance(other, list): other = Array(other) assert isinstance(other, Array), "Type should be Array, not {type}".format(type=type(other)) if len(self.shape) == 1 or len(other.shape) == 1: return self.dot(other) else: return self._matmul_matrix(other) def _matmul_matrix(self, other): assert isinstance(other, Array), "Type should be Array, not {type}".format(type=type(other)) assert len(self.shape) >= 2 and len(other.shape) >= 2, "Shape should be greater than 2" assert self.shape[-1] == other.shape[-2], \ "self.shape[-1] should be equal other.shape[-2].\n" + \ "For more details, see https://pyqubo.readthedocs.io/en/latest/reference/array.html" self_shape_len = len(self.shape) other_shape_len = len(other.shape) common_len = min(self_shape_len, other_shape_len) for s1, s2 in zip(self.shape[-common_len:-2], other.shape[-common_len:-2]): assert s1 == s2, "Shape doesn't match." longer_shape = self.shape if self_shape_len > other_shape_len else other.shape new_shape = longer_shape[:-2] + (self.shape[-2], other.shape[-1]) def generator(index): mat_index_self = tuple(index[-self_shape_len:][:-2]) mat_index_other = tuple(index[-other_shape_len:][:-2]) mat_self = self[mat_index_self] if mat_index_self != () else self mat_other = other[mat_index_other] if mat_index_other != () else other j = index[-1] i = index[-2] return mat_self[i, :].dot(mat_other[:, j]) return Array._create_with_generator(new_shape, generator) @staticmethod def _calc_steps(shape): steps = [] tmp_d = 1 for d in shape[::-1]: steps.append(tmp_d) tmp_d *= d steps = steps[::-1] return steps def reshape(self, new_shape): assert reduce(mul, self.shape) == reduce(mul, new_shape),\ "cannot reshape array of size {p} into shape {new_shape}".format( p=reduce(mul, self.shape), new_shape=new_shape) def calc_one_dim_array(nested_list): if isinstance(nested_list, list): return reduce(add, [calc_one_dim_array(e) for e in nested_list]) else: return [nested_list] # create an 1-dim array from the n-dim array one_dim_array = calc_one_dim_array(self.bit_list) new_steps = Array._calc_steps(new_shape) def generator(index): # create an index for 1-dim array from the given index one_dim_index = sum(step * i for step, i in zip(new_steps, index)) return one_dim_array[one_dim_index] return Array._create_with_generator(new_shape, generator)
true
true
f7248e4c92268c1a8e4f9a3e78264a4a28b054ff
533
py
Python
scripts/flash/plot_hitscores.py
JunCEEE/hummingbird
0b1bdf5023b92090f31d9bc857e0854a805cf2cd
[ "BSD-2-Clause" ]
14
2016-02-18T23:10:12.000Z
2021-07-30T09:19:56.000Z
scripts/flash/plot_hitscores.py
JunCEEE/hummingbird
0b1bdf5023b92090f31d9bc857e0854a805cf2cd
[ "BSD-2-Clause" ]
66
2015-11-18T15:39:45.000Z
2015-12-06T16:06:20.000Z
scripts/flash/plot_hitscores.py
JunCEEE/hummingbird
0b1bdf5023b92090f31d9bc857e0854a805cf2cd
[ "BSD-2-Clause" ]
13
2016-07-07T13:15:52.000Z
2021-11-10T11:56:13.000Z
#!/usr/bin/env python import h5py import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import sys runnr = int(sys.argv[1]) filename = '/asap3/flash/gpfs/bl1/2017/data/11001733/processed/hummingbird/r%04d_ol1.h5' %runnr with h5py.File(filename, 'r') as f: hitscore = f['entry_1/result_1/hitscore_litpixel'][:] fig = plt.figure() ax = fig.add_subplot(111) ax.plot(hitscore, 'k.') #ax.axhline(int(sys.argv[2])) fig.savefig('../plots/r%04d_hitscore.png' %runnr, dpi=100, bbox_inches='tight')
28.052632
95
0.729831
import h5py import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import sys runnr = int(sys.argv[1]) filename = '/asap3/flash/gpfs/bl1/2017/data/11001733/processed/hummingbird/r%04d_ol1.h5' %runnr with h5py.File(filename, 'r') as f: hitscore = f['entry_1/result_1/hitscore_litpixel'][:] fig = plt.figure() ax = fig.add_subplot(111) ax.plot(hitscore, 'k.') fig.savefig('../plots/r%04d_hitscore.png' %runnr, dpi=100, bbox_inches='tight')
true
true
f7248ee621042e30291d461ffdf3dcab8f265bba
106,231
py
Python
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/aio/operations/_virtual_machines_operations.py
vincenttran-msft/azure-sdk-for-python
348b56f9f03eeb3f7b502eed51daf494ffff874d
[ "MIT" ]
1
2021-09-07T18:39:05.000Z
2021-09-07T18:39:05.000Z
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/aio/operations/_virtual_machines_operations.py
vincenttran-msft/azure-sdk-for-python
348b56f9f03eeb3f7b502eed51daf494ffff874d
[ "MIT" ]
null
null
null
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/aio/operations/_virtual_machines_operations.py
vincenttran-msft/azure-sdk-for-python
348b56f9f03eeb3f7b502eed51daf494ffff874d
[ "MIT" ]
1
2022-03-04T06:21:56.000Z
2022-03-04T06:21:56.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models from ..._vendor import _convert_request from ...operations._virtual_machines_operations import build_assess_patches_request_initial, build_capture_request_initial, build_convert_to_managed_disks_request_initial, build_create_or_update_request_initial, build_deallocate_request_initial, build_delete_request_initial, build_generalize_request, build_get_request, build_install_patches_request_initial, build_instance_view_request, build_list_all_request, build_list_available_sizes_request, build_list_by_location_request, build_list_request, build_perform_maintenance_request_initial, build_power_off_request_initial, build_reapply_request_initial, build_redeploy_request_initial, build_reimage_request_initial, build_restart_request_initial, build_retrieve_boot_diagnostics_data_request, build_run_command_request_initial, build_simulate_eviction_request, build_start_request_initial, build_update_request_initial T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class VirtualMachinesOperations: """VirtualMachinesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.compute.v2021_04_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def list_by_location( self, location: str, **kwargs: Any ) -> AsyncIterable["_models.VirtualMachineListResult"]: """Gets all the virtual machines under the specified subscription for the specified location. :param location: The location for which virtual machines under the subscription are queried. :type location: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either VirtualMachineListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_by_location_request( location=location, subscription_id=self._config.subscription_id, template_url=self.list_by_location.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_by_location_request( location=location, subscription_id=self._config.subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("VirtualMachineListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/virtualMachines'} # type: ignore async def _capture_initial( self, resource_group_name: str, vm_name: str, parameters: "_models.VirtualMachineCaptureParameters", **kwargs: Any ) -> Optional["_models.VirtualMachineCaptureResult"]: cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualMachineCaptureResult"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(parameters, 'VirtualMachineCaptureParameters') request = build_capture_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._capture_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'} # type: ignore @distributed_trace_async async def begin_capture( self, resource_group_name: str, vm_name: str, parameters: "_models.VirtualMachineCaptureParameters", **kwargs: Any ) -> AsyncLROPoller["_models.VirtualMachineCaptureResult"]: """Captures the VM by copying virtual hard disks of the VM and outputs a template that can be used to create similar VMs. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :param parameters: Parameters supplied to the Capture Virtual Machine operation. :type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineCaptureParameters :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either VirtualMachineCaptureResult or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineCaptureResult] :raises: ~azure.core.exceptions.HttpResponseError """ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineCaptureResult"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._capture_initial( resource_group_name=resource_group_name, vm_name=vm_name, parameters=parameters, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'} # type: ignore async def _create_or_update_initial( self, resource_group_name: str, vm_name: str, parameters: "_models.VirtualMachine", **kwargs: Any ) -> "_models.VirtualMachine": cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(parameters, 'VirtualMachine') request = build_create_or_update_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._create_or_update_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('VirtualMachine', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('VirtualMachine', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore @distributed_trace_async async def begin_create_or_update( self, resource_group_name: str, vm_name: str, parameters: "_models.VirtualMachine", **kwargs: Any ) -> AsyncLROPoller["_models.VirtualMachine"]: """The operation to create or update a virtual machine. Please note some properties can be set only during virtual machine creation. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :param parameters: Parameters supplied to the Create Virtual Machine operation. :type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachine :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either VirtualMachine or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachine] :raises: ~azure.core.exceptions.HttpResponseError """ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, vm_name=vm_name, parameters=parameters, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('VirtualMachine', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore async def _update_initial( self, resource_group_name: str, vm_name: str, parameters: "_models.VirtualMachineUpdate", **kwargs: Any ) -> "_models.VirtualMachine": cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(parameters, 'VirtualMachineUpdate') request = build_update_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._update_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualMachine', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore @distributed_trace_async async def begin_update( self, resource_group_name: str, vm_name: str, parameters: "_models.VirtualMachineUpdate", **kwargs: Any ) -> AsyncLROPoller["_models.VirtualMachine"]: """The operation to update a virtual machine. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :param parameters: Parameters supplied to the Update Virtual Machine operation. :type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineUpdate :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either VirtualMachine or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachine] :raises: ~azure.core.exceptions.HttpResponseError """ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._update_initial( resource_group_name=resource_group_name, vm_name=vm_name, parameters=parameters, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('VirtualMachine', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore async def _delete_initial( self, resource_group_name: str, vm_name: str, force_deletion: Optional[bool] = None, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_delete_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, force_deletion=force_deletion, template_url=self._delete_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore @distributed_trace_async async def begin_delete( self, resource_group_name: str, vm_name: str, force_deletion: Optional[bool] = None, **kwargs: Any ) -> AsyncLROPoller[None]: """The operation to delete a virtual machine. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :param force_deletion: Optional parameter to force delete virtual machines. :type force_deletion: bool :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, vm_name=vm_name, force_deletion=force_deletion, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore @distributed_trace_async async def get( self, resource_group_name: str, vm_name: str, expand: Optional[Union[str, "_models.InstanceViewTypes"]] = None, **kwargs: Any ) -> "_models.VirtualMachine": """Retrieves information about the model view or the instance view of a virtual machine. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :param expand: The expand expression to apply on the operation. 'InstanceView' retrieves a snapshot of the runtime properties of the virtual machine that is managed by the platform and can change outside of control plane operations. 'UserData' retrieves the UserData property as part of the VM model view that was provided by the user during the VM Create/Update operation. :type expand: str or ~azure.mgmt.compute.v2021_04_01.models.InstanceViewTypes :keyword callable cls: A custom type or function that will be passed the direct response :return: VirtualMachine, or the result of cls(response) :rtype: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachine :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, expand=expand, template_url=self.get.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualMachine', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore @distributed_trace_async async def instance_view( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> "_models.VirtualMachineInstanceView": """Retrieves information about the run-time state of a virtual machine. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: VirtualMachineInstanceView, or the result of cls(response) :rtype: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineInstanceView :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineInstanceView"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_instance_view_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self.instance_view.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualMachineInstanceView', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized instance_view.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/instanceView'} # type: ignore async def _convert_to_managed_disks_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_convert_to_managed_disks_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._convert_to_managed_disks_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _convert_to_managed_disks_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks'} # type: ignore @distributed_trace_async async def begin_convert_to_managed_disks( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Converts virtual machine disks from blob-based to managed disks. Virtual machine must be stop-deallocated before invoking this operation. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._convert_to_managed_disks_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_convert_to_managed_disks.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks'} # type: ignore async def _deallocate_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_deallocate_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._deallocate_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _deallocate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'} # type: ignore @distributed_trace_async async def begin_deallocate( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Shuts down the virtual machine and releases the compute resources. You are not billed for the compute resources that this virtual machine uses. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._deallocate_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_deallocate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'} # type: ignore @distributed_trace_async async def generalize( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: """Sets the OS state of the virtual machine to generalized. It is recommended to sysprep the virtual machine before performing this operation. :code:`<br>`For Windows, please refer to `Create a managed image of a generalized VM in Azure <https://docs.microsoft.com/azure/virtual-machines/windows/capture-image-resource>`_.:code:`<br>`For Linux, please refer to `How to create an image of a virtual machine or VHD <https://docs.microsoft.com/azure/virtual-machines/linux/capture-image>`_. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_generalize_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self.generalize.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) generalize.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize'} # type: ignore @distributed_trace def list( self, resource_group_name: str, **kwargs: Any ) -> AsyncIterable["_models.VirtualMachineListResult"]: """Lists all of the virtual machines in the specified resource group. Use the nextLink property in the response to get the next page of virtual machines. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either VirtualMachineListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_request( resource_group_name=resource_group_name, subscription_id=self._config.subscription_id, template_url=self.list.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request( resource_group_name=resource_group_name, subscription_id=self._config.subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("VirtualMachineListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines'} # type: ignore @distributed_trace def list_all( self, status_only: Optional[str] = None, **kwargs: Any ) -> AsyncIterable["_models.VirtualMachineListResult"]: """Lists all of the virtual machines in the specified subscription. Use the nextLink property in the response to get the next page of virtual machines. :param status_only: statusOnly=true enables fetching run time status of all Virtual Machines in the subscription. :type status_only: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either VirtualMachineListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_all_request( subscription_id=self._config.subscription_id, status_only=status_only, template_url=self.list_all.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_all_request( subscription_id=self._config.subscription_id, status_only=status_only, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("VirtualMachineListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines'} # type: ignore @distributed_trace def list_available_sizes( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncIterable["_models.VirtualMachineSizeListResult"]: """Lists all available virtual machine sizes to which the specified virtual machine can be resized. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either VirtualMachineSizeListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineSizeListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineSizeListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_available_sizes_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self.list_available_sizes.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_available_sizes_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_available_sizes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes'} # type: ignore async def _power_off_initial( self, resource_group_name: str, vm_name: str, skip_shutdown: Optional[bool] = False, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_power_off_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, skip_shutdown=skip_shutdown, template_url=self._power_off_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _power_off_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'} # type: ignore @distributed_trace_async async def begin_power_off( self, resource_group_name: str, vm_name: str, skip_shutdown: Optional[bool] = False, **kwargs: Any ) -> AsyncLROPoller[None]: """The operation to power off (stop) a virtual machine. The virtual machine can be restarted with the same provisioned resources. You are still charged for this virtual machine. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :param skip_shutdown: The parameter to request non-graceful VM shutdown. True value for this flag indicates non-graceful shutdown whereas false indicates otherwise. Default value for this flag is false if not specified. :type skip_shutdown: bool :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._power_off_initial( resource_group_name=resource_group_name, vm_name=vm_name, skip_shutdown=skip_shutdown, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_power_off.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'} # type: ignore async def _reapply_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_reapply_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._reapply_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _reapply_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply'} # type: ignore @distributed_trace_async async def begin_reapply( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """The operation to reapply a virtual machine's state. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._reapply_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_reapply.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply'} # type: ignore async def _restart_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_restart_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._restart_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'} # type: ignore @distributed_trace_async async def begin_restart( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """The operation to restart a virtual machine. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._restart_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'} # type: ignore async def _start_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_start_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._start_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'} # type: ignore @distributed_trace_async async def begin_start( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """The operation to start a virtual machine. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._start_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'} # type: ignore async def _redeploy_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_redeploy_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._redeploy_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _redeploy_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'} # type: ignore @distributed_trace_async async def begin_redeploy( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Shuts down the virtual machine, moves it to a new node, and powers it back on. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._redeploy_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_redeploy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'} # type: ignore async def _reimage_initial( self, resource_group_name: str, vm_name: str, parameters: Optional["_models.VirtualMachineReimageParameters"] = None, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] if parameters is not None: _json = self._serialize.body(parameters, 'VirtualMachineReimageParameters') else: _json = None request = build_reimage_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._reimage_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _reimage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage'} # type: ignore @distributed_trace_async async def begin_reimage( self, resource_group_name: str, vm_name: str, parameters: Optional["_models.VirtualMachineReimageParameters"] = None, **kwargs: Any ) -> AsyncLROPoller[None]: """Reimages the virtual machine which has an ephemeral OS disk back to its initial state. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :param parameters: Parameters supplied to the Reimage Virtual Machine operation. :type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineReimageParameters :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError """ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._reimage_initial( resource_group_name=resource_group_name, vm_name=vm_name, parameters=parameters, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_reimage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage'} # type: ignore @distributed_trace_async async def retrieve_boot_diagnostics_data( self, resource_group_name: str, vm_name: str, sas_uri_expiration_time_in_minutes: Optional[int] = None, **kwargs: Any ) -> "_models.RetrieveBootDiagnosticsDataResult": """The operation to retrieve SAS URIs for a virtual machine's boot diagnostic logs. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :param sas_uri_expiration_time_in_minutes: Expiration duration in minutes for the SAS URIs with a value between 1 to 1440 minutes. :code:`<br>`:code:`<br>`NOTE: If not specified, SAS URIs will be generated with a default expiration duration of 120 minutes. :type sas_uri_expiration_time_in_minutes: int :keyword callable cls: A custom type or function that will be passed the direct response :return: RetrieveBootDiagnosticsDataResult, or the result of cls(response) :rtype: ~azure.mgmt.compute.v2021_04_01.models.RetrieveBootDiagnosticsDataResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.RetrieveBootDiagnosticsDataResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_retrieve_boot_diagnostics_data_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, sas_uri_expiration_time_in_minutes=sas_uri_expiration_time_in_minutes, template_url=self.retrieve_boot_diagnostics_data.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('RetrieveBootDiagnosticsDataResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized retrieve_boot_diagnostics_data.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/retrieveBootDiagnosticsData'} # type: ignore async def _perform_maintenance_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_perform_maintenance_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._perform_maintenance_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _perform_maintenance_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance'} # type: ignore @distributed_trace_async async def begin_perform_maintenance( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """The operation to perform maintenance on a virtual machine. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._perform_maintenance_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_perform_maintenance.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance'} # type: ignore @distributed_trace_async async def simulate_eviction( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: """The operation to simulate the eviction of spot virtual machine. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_simulate_eviction_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self.simulate_eviction.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) simulate_eviction.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/simulateEviction'} # type: ignore async def _assess_patches_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> Optional["_models.VirtualMachineAssessPatchesResult"]: cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualMachineAssessPatchesResult"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_assess_patches_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._assess_patches_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('VirtualMachineAssessPatchesResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _assess_patches_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches'} # type: ignore @distributed_trace_async async def begin_assess_patches( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller["_models.VirtualMachineAssessPatchesResult"]: """Assess patches on the VM. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either VirtualMachineAssessPatchesResult or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineAssessPatchesResult] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineAssessPatchesResult"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._assess_patches_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('VirtualMachineAssessPatchesResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_assess_patches.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches'} # type: ignore async def _install_patches_initial( self, resource_group_name: str, vm_name: str, install_patches_input: "_models.VirtualMachineInstallPatchesParameters", **kwargs: Any ) -> Optional["_models.VirtualMachineInstallPatchesResult"]: cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualMachineInstallPatchesResult"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(install_patches_input, 'VirtualMachineInstallPatchesParameters') request = build_install_patches_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._install_patches_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('VirtualMachineInstallPatchesResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _install_patches_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches'} # type: ignore @distributed_trace_async async def begin_install_patches( self, resource_group_name: str, vm_name: str, install_patches_input: "_models.VirtualMachineInstallPatchesParameters", **kwargs: Any ) -> AsyncLROPoller["_models.VirtualMachineInstallPatchesResult"]: """Installs patches on the VM. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :param install_patches_input: Input for InstallPatches as directly received by the API. :type install_patches_input: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineInstallPatchesParameters :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either VirtualMachineInstallPatchesResult or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineInstallPatchesResult] :raises: ~azure.core.exceptions.HttpResponseError """ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineInstallPatchesResult"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._install_patches_initial( resource_group_name=resource_group_name, vm_name=vm_name, install_patches_input=install_patches_input, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('VirtualMachineInstallPatchesResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_install_patches.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches'} # type: ignore async def _run_command_initial( self, resource_group_name: str, vm_name: str, parameters: "_models.RunCommandInput", **kwargs: Any ) -> Optional["_models.RunCommandResult"]: cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.RunCommandResult"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(parameters, 'RunCommandInput') request = build_run_command_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._run_command_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('RunCommandResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _run_command_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand'} # type: ignore @distributed_trace_async async def begin_run_command( self, resource_group_name: str, vm_name: str, parameters: "_models.RunCommandInput", **kwargs: Any ) -> AsyncLROPoller["_models.RunCommandResult"]: """Run command on the VM. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param vm_name: The name of the virtual machine. :type vm_name: str :param parameters: Parameters supplied to the Run command operation. :type parameters: ~azure.mgmt.compute.v2021_04_01.models.RunCommandInput :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.RunCommandResult] :raises: ~azure.core.exceptions.HttpResponseError """ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.RunCommandResult"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._run_command_initial( resource_group_name=resource_group_name, vm_name=vm_name, parameters=parameters, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('RunCommandResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_run_command.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand'} # type: ignore
46.167319
873
0.668468
import functools from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models from ..._vendor import _convert_request from ...operations._virtual_machines_operations import build_assess_patches_request_initial, build_capture_request_initial, build_convert_to_managed_disks_request_initial, build_create_or_update_request_initial, build_deallocate_request_initial, build_delete_request_initial, build_generalize_request, build_get_request, build_install_patches_request_initial, build_instance_view_request, build_list_all_request, build_list_available_sizes_request, build_list_by_location_request, build_list_request, build_perform_maintenance_request_initial, build_power_off_request_initial, build_reapply_request_initial, build_redeploy_request_initial, build_reimage_request_initial, build_restart_request_initial, build_retrieve_boot_diagnostics_data_request, build_run_command_request_initial, build_simulate_eviction_request, build_start_request_initial, build_update_request_initial T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class VirtualMachinesOperations: models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def list_by_location( self, location: str, **kwargs: Any ) -> AsyncIterable["_models.VirtualMachineListResult"]: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_by_location_request( location=location, subscription_id=self._config.subscription_id, template_url=self.list_by_location.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_by_location_request( location=location, subscription_id=self._config.subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("VirtualMachineListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/virtualMachines'} async def _capture_initial( self, resource_group_name: str, vm_name: str, parameters: "_models.VirtualMachineCaptureParameters", **kwargs: Any ) -> Optional["_models.VirtualMachineCaptureResult"]: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") _json = self._serialize.body(parameters, 'VirtualMachineCaptureParameters') request = build_capture_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._capture_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'} @distributed_trace_async async def begin_capture( self, resource_group_name: str, vm_name: str, parameters: "_models.VirtualMachineCaptureParameters", **kwargs: Any ) -> AsyncLROPoller["_models.VirtualMachineCaptureResult"]: content_type = kwargs.pop('content_type', "application/json") polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._capture_initial( resource_group_name=resource_group_name, vm_name=vm_name, parameters=parameters, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'} async def _create_or_update_initial( self, resource_group_name: str, vm_name: str, parameters: "_models.VirtualMachine", **kwargs: Any ) -> "_models.VirtualMachine": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") _json = self._serialize.body(parameters, 'VirtualMachine') request = build_create_or_update_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._create_or_update_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('VirtualMachine', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('VirtualMachine', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} @distributed_trace_async async def begin_create_or_update( self, resource_group_name: str, vm_name: str, parameters: "_models.VirtualMachine", **kwargs: Any ) -> AsyncLROPoller["_models.VirtualMachine"]: content_type = kwargs.pop('content_type', "application/json") polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._create_or_update_initial( resource_group_name=resource_group_name, vm_name=vm_name, parameters=parameters, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('VirtualMachine', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} async def _update_initial( self, resource_group_name: str, vm_name: str, parameters: "_models.VirtualMachineUpdate", **kwargs: Any ) -> "_models.VirtualMachine": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") _json = self._serialize.body(parameters, 'VirtualMachineUpdate') request = build_update_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._update_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualMachine', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} @distributed_trace_async async def begin_update( self, resource_group_name: str, vm_name: str, parameters: "_models.VirtualMachineUpdate", **kwargs: Any ) -> AsyncLROPoller["_models.VirtualMachine"]: content_type = kwargs.pop('content_type', "application/json") polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._update_initial( resource_group_name=resource_group_name, vm_name=vm_name, parameters=parameters, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('VirtualMachine', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} async def _delete_initial( self, resource_group_name: str, vm_name: str, force_deletion: Optional[bool] = None, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_delete_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, force_deletion=force_deletion, template_url=self._delete_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} @distributed_trace_async async def begin_delete( self, resource_group_name: str, vm_name: str, force_deletion: Optional[bool] = None, **kwargs: Any ) -> AsyncLROPoller[None]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, vm_name=vm_name, force_deletion=force_deletion, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} @distributed_trace_async async def get( self, resource_group_name: str, vm_name: str, expand: Optional[Union[str, "_models.InstanceViewTypes"]] = None, **kwargs: Any ) -> "_models.VirtualMachine": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, expand=expand, template_url=self.get.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualMachine', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} @distributed_trace_async async def instance_view( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> "_models.VirtualMachineInstanceView": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_instance_view_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self.instance_view.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualMachineInstanceView', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized instance_view.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/instanceView'} async def _convert_to_managed_disks_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_convert_to_managed_disks_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._convert_to_managed_disks_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _convert_to_managed_disks_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks'} @distributed_trace_async async def begin_convert_to_managed_disks( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._convert_to_managed_disks_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_convert_to_managed_disks.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks'} async def _deallocate_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_deallocate_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._deallocate_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _deallocate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'} @distributed_trace_async async def begin_deallocate( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._deallocate_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_deallocate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'} @distributed_trace_async async def generalize( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_generalize_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self.generalize.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) generalize.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize'} @distributed_trace def list( self, resource_group_name: str, **kwargs: Any ) -> AsyncIterable["_models.VirtualMachineListResult"]: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_request( resource_group_name=resource_group_name, subscription_id=self._config.subscription_id, template_url=self.list.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request( resource_group_name=resource_group_name, subscription_id=self._config.subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("VirtualMachineListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines'} @distributed_trace def list_all( self, status_only: Optional[str] = None, **kwargs: Any ) -> AsyncIterable["_models.VirtualMachineListResult"]: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_all_request( subscription_id=self._config.subscription_id, status_only=status_only, template_url=self.list_all.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_all_request( subscription_id=self._config.subscription_id, status_only=status_only, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("VirtualMachineListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines'} @distributed_trace def list_available_sizes( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncIterable["_models.VirtualMachineSizeListResult"]: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_available_sizes_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self.list_available_sizes.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_available_sizes_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_available_sizes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes'} async def _power_off_initial( self, resource_group_name: str, vm_name: str, skip_shutdown: Optional[bool] = False, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_power_off_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, skip_shutdown=skip_shutdown, template_url=self._power_off_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _power_off_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'} @distributed_trace_async async def begin_power_off( self, resource_group_name: str, vm_name: str, skip_shutdown: Optional[bool] = False, **kwargs: Any ) -> AsyncLROPoller[None]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._power_off_initial( resource_group_name=resource_group_name, vm_name=vm_name, skip_shutdown=skip_shutdown, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_power_off.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'} async def _reapply_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_reapply_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._reapply_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _reapply_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply'} @distributed_trace_async async def begin_reapply( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._reapply_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_reapply.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply'} async def _restart_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_restart_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._restart_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'} @distributed_trace_async async def begin_restart( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._restart_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'} async def _start_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_start_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._start_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'} @distributed_trace_async async def begin_start( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._start_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'} async def _redeploy_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_redeploy_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._redeploy_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _redeploy_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'} @distributed_trace_async async def begin_redeploy( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._redeploy_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_redeploy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'} async def _reimage_initial( self, resource_group_name: str, vm_name: str, parameters: Optional["_models.VirtualMachineReimageParameters"] = None, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") if parameters is not None: _json = self._serialize.body(parameters, 'VirtualMachineReimageParameters') else: _json = None request = build_reimage_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._reimage_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _reimage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage'} @distributed_trace_async async def begin_reimage( self, resource_group_name: str, vm_name: str, parameters: Optional["_models.VirtualMachineReimageParameters"] = None, **kwargs: Any ) -> AsyncLROPoller[None]: content_type = kwargs.pop('content_type', "application/json") polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._reimage_initial( resource_group_name=resource_group_name, vm_name=vm_name, parameters=parameters, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_reimage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage'} @distributed_trace_async async def retrieve_boot_diagnostics_data( self, resource_group_name: str, vm_name: str, sas_uri_expiration_time_in_minutes: Optional[int] = None, **kwargs: Any ) -> "_models.RetrieveBootDiagnosticsDataResult": cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_retrieve_boot_diagnostics_data_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, sas_uri_expiration_time_in_minutes=sas_uri_expiration_time_in_minutes, template_url=self.retrieve_boot_diagnostics_data.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('RetrieveBootDiagnosticsDataResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized retrieve_boot_diagnostics_data.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/retrieveBootDiagnosticsData'} async def _perform_maintenance_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_perform_maintenance_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._perform_maintenance_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _perform_maintenance_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance'} @distributed_trace_async async def begin_perform_maintenance( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._perform_maintenance_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_perform_maintenance.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance'} @distributed_trace_async async def simulate_eviction( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_simulate_eviction_request( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self.simulate_eviction.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) simulate_eviction.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/simulateEviction'} async def _assess_patches_initial( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> Optional["_models.VirtualMachineAssessPatchesResult"]: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_assess_patches_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, template_url=self._assess_patches_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('VirtualMachineAssessPatchesResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _assess_patches_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches'} @distributed_trace_async async def begin_assess_patches( self, resource_group_name: str, vm_name: str, **kwargs: Any ) -> AsyncLROPoller["_models.VirtualMachineAssessPatchesResult"]: polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._assess_patches_initial( resource_group_name=resource_group_name, vm_name=vm_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('VirtualMachineAssessPatchesResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_assess_patches.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches'} async def _install_patches_initial( self, resource_group_name: str, vm_name: str, install_patches_input: "_models.VirtualMachineInstallPatchesParameters", **kwargs: Any ) -> Optional["_models.VirtualMachineInstallPatchesResult"]: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") _json = self._serialize.body(install_patches_input, 'VirtualMachineInstallPatchesParameters') request = build_install_patches_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._install_patches_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('VirtualMachineInstallPatchesResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _install_patches_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches'} @distributed_trace_async async def begin_install_patches( self, resource_group_name: str, vm_name: str, install_patches_input: "_models.VirtualMachineInstallPatchesParameters", **kwargs: Any ) -> AsyncLROPoller["_models.VirtualMachineInstallPatchesResult"]: content_type = kwargs.pop('content_type', "application/json") polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._install_patches_initial( resource_group_name=resource_group_name, vm_name=vm_name, install_patches_input=install_patches_input, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('VirtualMachineInstallPatchesResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_install_patches.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches'} async def _run_command_initial( self, resource_group_name: str, vm_name: str, parameters: "_models.RunCommandInput", **kwargs: Any ) -> Optional["_models.RunCommandResult"]: cls = kwargs.pop('cls', None) error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") _json = self._serialize.body(parameters, 'RunCommandInput') request = build_run_command_request_initial( resource_group_name=resource_group_name, vm_name=vm_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._run_command_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('RunCommandResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _run_command_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand'} @distributed_trace_async async def begin_run_command( self, resource_group_name: str, vm_name: str, parameters: "_models.RunCommandInput", **kwargs: Any ) -> AsyncLROPoller["_models.RunCommandResult"]: content_type = kwargs.pop('content_type', "application/json") polling = kwargs.pop('polling', True) cls = kwargs.pop('cls', None) lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) if cont_token is None: raw_result = await self._run_command_initial( resource_group_name=resource_group_name, vm_name=vm_name, parameters=parameters, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('RunCommandResult', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_run_command.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand'}
true
true
f7248f210691f99763f6311365ed9eb869ed4aa4
20,439
py
Python
mlrun/runtimes/pod.py
Michaelliv/mlrun
f155836f71e86cfcc573bcf1aa35762d72feeb5a
[ "Apache-2.0" ]
null
null
null
mlrun/runtimes/pod.py
Michaelliv/mlrun
f155836f71e86cfcc573bcf1aa35762d72feeb5a
[ "Apache-2.0" ]
null
null
null
mlrun/runtimes/pod.py
Michaelliv/mlrun
f155836f71e86cfcc573bcf1aa35762d72feeb5a
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 Iguazio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import typing import uuid from enum import Enum from kfp.dsl import ContainerOp, _container_op from kubernetes import client import mlrun.errors import mlrun.utils.regex from ..config import config as mlconf from ..utils import logger, normalize_name, update_in, verify_field_regex from .base import BaseRuntime, FunctionSpec from .utils import ( apply_kfp, generate_resources, get_item_name, get_resource_labels, set_named_item, ) class KubeResourceSpec(FunctionSpec): def __init__( self, command=None, args=None, image=None, mode=None, volumes=None, volume_mounts=None, env=None, resources=None, default_handler=None, pythonpath=None, entry_points=None, description=None, workdir=None, replicas=None, image_pull_policy=None, service_account=None, build=None, image_pull_secret=None, node_name=None, node_selector=None, affinity=None, mount_applied=False, priority_class_name=None, ): super().__init__( command=command, args=args, image=image, mode=mode, build=build, entry_points=entry_points, description=description, workdir=workdir, default_handler=default_handler, pythonpath=pythonpath, mount_applied=mount_applied, ) self._volumes = {} self._volume_mounts = {} self.volumes = volumes or [] self.volume_mounts = volume_mounts or [] self.env = env or [] self.resources = resources or {} self.replicas = replicas self.image_pull_policy = image_pull_policy self.service_account = service_account self.image_pull_secret = image_pull_secret self.node_name = node_name self.node_selector = ( node_selector or mlrun.mlconf.get_default_function_node_selector() ) self._affinity = affinity self.priority_class_name = ( priority_class_name or mlrun.mlconf.default_function_priority_class_name ) @property def volumes(self) -> list: return list(self._volumes.values()) @volumes.setter def volumes(self, volumes): self._volumes = {} if volumes: for vol in volumes: set_named_item(self._volumes, vol) @property def volume_mounts(self) -> list: return list(self._volume_mounts.values()) @volume_mounts.setter def volume_mounts(self, volume_mounts): self._volume_mounts = {} if volume_mounts: for volume_mount in volume_mounts: self._set_volume_mount(volume_mount) @property def affinity(self) -> client.V1Affinity: return self._affinity @affinity.setter def affinity(self, affinity): self._affinity = self._transform_affinity_to_k8s_class_instance(affinity) def to_dict(self, fields=None, exclude=None): struct = super().to_dict(fields, exclude=["affinity"]) api = client.ApiClient() struct["affinity"] = api.sanitize_for_serialization(self.affinity) return struct def update_vols_and_mounts(self, volumes, volume_mounts): if volumes: for vol in volumes: set_named_item(self._volumes, vol) if volume_mounts: for volume_mount in volume_mounts: self._set_volume_mount(volume_mount) def _get_affinity_as_k8s_class_instance(self): pass def _transform_affinity_to_k8s_class_instance(self, affinity): if not affinity: return None if isinstance(affinity, dict): api = client.ApiClient() # not ideal to use their private method, but looks like that's the only option # Taken from https://github.com/kubernetes-client/python/issues/977 affinity = api._ApiClient__deserialize(affinity, "V1Affinity") return affinity def _get_sanitized_affinity(self): """ When using methods like to_dict() on kubernetes class instances we're getting the attributes in snake_case Which is ok if we're using the kubernetes python package but not if for example we're creating CRDs that we apply directly. For that we need the sanitized (CamelCase) version. """ if not self.affinity: return {} if isinstance(self.affinity, dict): # heuristic - if node_affinity is part of the dict it means to_dict on the kubernetes object performed, # there's nothing we can do at that point to transform it to the sanitized version if "node_affinity" in self.affinity: raise mlrun.errors.MLRunInvalidArgumentError( "Affinity must be instance of kubernetes' V1Affinity class" ) elif "nodeAffinity" in self.affinity: # then it's already the sanitized version return self.affinity api = client.ApiClient() return api.sanitize_for_serialization(self.affinity) def _set_volume_mount(self, volume_mount): # calculate volume mount hash volume_name = get_item_name(volume_mount, "name") volume_sub_path = get_item_name(volume_mount, "subPath") volume_mount_path = get_item_name(volume_mount, "mountPath") volume_mount_key = hash(f"{volume_name}-{volume_sub_path}-{volume_mount_path}") self._volume_mounts[volume_mount_key] = volume_mount class AutoMountType(str, Enum): none = "none" auto = "auto" v3io_credentials = "v3io_credentials" v3io_fuse = "v3io_fuse" pvc = "pvc" @classmethod def _missing_(cls, value): return AutoMountType.default() @staticmethod def default(): return AutoMountType.auto # Any modifier that configures a mount on a runtime should be included here. These modifiers, if applied to the # runtime, will suppress the auto-mount functionality. @classmethod def all_mount_modifiers(cls): return [ mlrun.v3io_cred.__name__, mlrun.mount_v3io.__name__, mlrun.platforms.other.mount_pvc.__name__, mlrun.auto_mount.__name__, ] @staticmethod def _get_auto_modifier(): # If we're running on Iguazio - use v3io_cred if mlconf.igz_version != "": return mlrun.v3io_cred # Else, either pvc mount if it's configured or do nothing otherwise pvc_configured = ( "MLRUN_PVC_MOUNT" in os.environ or "pvc_name" in mlconf.get_storage_auto_mount_params() ) return mlrun.platforms.other.mount_pvc if pvc_configured else None def get_modifier(self): return { AutoMountType.none: None, AutoMountType.v3io_credentials: mlrun.v3io_cred, AutoMountType.v3io_fuse: mlrun.mount_v3io, AutoMountType.pvc: mlrun.platforms.other.mount_pvc, AutoMountType.auto: self._get_auto_modifier(), }[self] class KubeResource(BaseRuntime): kind = "job" _is_nested = True def __init__(self, spec=None, metadata=None): super().__init__(metadata, spec) self.verbose = False @property def spec(self) -> KubeResourceSpec: return self._spec @spec.setter def spec(self, spec): self._spec = self._verify_dict(spec, "spec", KubeResourceSpec) def to_dict(self, fields=None, exclude=None, strip=False): struct = super().to_dict(fields, exclude, strip=strip) api = client.ApiClient() struct = api.sanitize_for_serialization(struct) if strip: spec = struct["spec"] for attr in ["volumes", "volume_mounts"]: if attr in spec: del spec[attr] if "env" in spec and spec["env"]: for ev in spec["env"]: if ev["name"].startswith("V3IO_"): ev["value"] = "" return struct def apply(self, modify): # Kubeflow pipeline have a hook to add the component to the DAG on ContainerOp init # we remove the hook to suppress kubeflow op registration and return it after the apply() old_op_handler = _container_op._register_op_handler _container_op._register_op_handler = lambda x: self.metadata.name cop = ContainerOp("name", "image") _container_op._register_op_handler = old_op_handler return apply_kfp(modify, cop, self) def set_env_from_secret(self, name, secret=None, secret_key=None): """set pod environment var from secret""" secret_key = secret_key or name value_from = client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector(name=secret, key=secret_key) ) return self._set_env(name, value_from=value_from) def set_env(self, name, value): """set pod environment var from value""" return self._set_env(name, value=str(value)) def is_env_exists(self, name): """Check whether there is an environment variable define for the given key""" for env_var in self.spec.env: if get_item_name(env_var) == name: return True return False def _set_env(self, name, value=None, value_from=None): new_var = client.V1EnvVar(name=name, value=value, value_from=value_from) i = 0 for v in self.spec.env: if get_item_name(v) == name: self.spec.env[i] = new_var return self i += 1 self.spec.env.append(new_var) return self def set_envs(self, env_vars): """set pod environment var key/value dict""" for name, value in env_vars.items(): self.set_env(name, value) return self def gpus(self, gpus, gpu_type="nvidia.com/gpu"): update_in(self.spec.resources, ["limits", gpu_type], gpus) def with_limits(self, mem=None, cpu=None, gpus=None, gpu_type="nvidia.com/gpu"): """set pod cpu/memory/gpu limits""" self._verify_and_set_limits("resources", mem, cpu, gpus, gpu_type) def with_requests(self, mem=None, cpu=None): """set requested (desired) pod cpu/memory resources""" self._verify_and_set_requests("resources", mem, cpu) def with_node_selection( self, node_name: typing.Optional[str] = None, node_selector: typing.Optional[typing.Dict[str, str]] = None, affinity: typing.Optional[client.V1Affinity] = None, ): """ Enables to control on which k8s node the job will run :param node_name: The name of the k8s node :param node_selector: Label selector, only nodes with matching labels will be eligible to be picked :param affinity: Expands the types of constraints you can express - see https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity for details """ if node_name: self.spec.node_name = node_name if node_selector: self.spec.node_selector = node_selector if affinity: self.spec.affinity = affinity def with_priority_class(self, name: typing.Optional[str] = None): """ Enables to control the priority of the pod If not passed - will default to mlrun.mlconf.default_function_priority_class_name :param name: The name of the priority class """ if name is None: name = mlconf.default_function_priority_class_name valid_priority_class_names = self.list_valid_and_default_priority_class_names()[ "valid_function_priority_class_names" ] if name not in valid_priority_class_names: message = "Priority class name not in available priority class names" logger.warning( message, priority_class_name=name, valid_priority_class_names=valid_priority_class_names, ) raise mlrun.errors.MLRunInvalidArgumentError(message) self.spec.priority_class_name = name def list_valid_and_default_priority_class_names(self): return { "default_function_priority_class_name": mlconf.default_function_priority_class_name, "valid_function_priority_class_names": mlconf.get_valid_function_priority_class_names(), } def _verify_and_set_limits( self, resources_field_name, mem=None, cpu=None, gpus=None, gpu_type="nvidia.com/gpu", ): if mem: verify_field_regex( f"function.spec.{resources_field_name}.limits.memory", mem, mlrun.utils.regex.k8s_resource_quantity_regex, ) if cpu: verify_field_regex( f"function.spec.{resources_field_name}.limits.cpu", cpu, mlrun.utils.regex.k8s_resource_quantity_regex, ) if gpus: verify_field_regex( f"function.spec.{resources_field_name}.limits.gpus", gpus, mlrun.utils.regex.k8s_resource_quantity_regex, ) update_in( getattr(self.spec, resources_field_name), "limits", generate_resources(mem=mem, cpu=cpu, gpus=gpus, gpu_type=gpu_type), ) def _verify_and_set_requests(self, resources_field_name, mem=None, cpu=None): if mem: verify_field_regex( f"function.spec.{resources_field_name}.requests.memory", mem, mlrun.utils.regex.k8s_resource_quantity_regex, ) if cpu: verify_field_regex( f"function.spec.{resources_field_name}.requests.cpu", cpu, mlrun.utils.regex.k8s_resource_quantity_regex, ) update_in( getattr(self.spec, resources_field_name), "requests", generate_resources(mem=mem, cpu=cpu), ) def _get_meta(self, runobj, unique=False): namespace = self._get_k8s().resolve_namespace() labels = get_resource_labels(self, runobj, runobj.spec.scrape_metrics) new_meta = client.V1ObjectMeta(namespace=namespace, labels=labels) name = runobj.metadata.name or "mlrun" norm_name = f"{normalize_name(name)}-" if unique: norm_name += uuid.uuid4().hex[:8] new_meta.name = norm_name runobj.set_label("mlrun/job", norm_name) else: new_meta.generate_name = norm_name return new_meta def _add_azure_vault_params_to_spec(self, k8s_secret_name=None): secret_name = ( k8s_secret_name or mlconf.secret_stores.azure_vault.default_secret_name ) if not secret_name: logger.warning( "No k8s secret provided. Azure key vault will not be available" ) return # We cannot use expanduser() here, since the user in question is the user running in the pod # itself (which is root) and not where this code is running. That's why this hacky replacement is needed. secret_path = mlconf.secret_stores.azure_vault.secret_path.replace("~", "/root") volumes = [ { "name": "azure-vault-secret", "secret": {"defaultMode": 420, "secretName": secret_name}, } ] volume_mounts = [{"name": "azure-vault-secret", "mountPath": secret_path}] self.spec.update_vols_and_mounts(volumes, volume_mounts) def _add_project_k8s_secrets_to_spec(self, secrets, runobj=None, project=None): project_name = project or runobj.metadata.project if project_name is None: logger.warning("No project provided. Cannot add k8s secrets") return secret_name = self._get_k8s().get_project_secret_name(project_name) existing_secret_keys = ( self._get_k8s().get_project_secret_keys(project_name) or {} ) # If no secrets were passed, we need all existing keys if not secrets: secrets = { key: self._secrets.k8s_env_variable_name_for_secret(key) for key in existing_secret_keys } for key, env_var_name in secrets.items(): if key in existing_secret_keys: self.set_env_from_secret(env_var_name, secret_name, key) def _add_vault_params_to_spec(self, runobj=None, project=None): project_name = project or runobj.metadata.project if project_name is None: logger.warning("No project provided. Cannot add vault parameters") return service_account_name = mlconf.secret_stores.vault.project_service_account_name.format( project=project_name ) project_vault_secret_name = self._get_k8s().get_project_vault_secret_name( project_name, service_account_name ) if project_vault_secret_name is None: logger.info(f"No vault secret associated with project {project_name}") return volumes = [ { "name": "vault-secret", "secret": {"defaultMode": 420, "secretName": project_vault_secret_name}, } ] # We cannot use expanduser() here, since the user in question is the user running in the pod # itself (which is root) and not where this code is running. That's why this hacky replacement is needed. token_path = mlconf.secret_stores.vault.token_path.replace("~", "/root") volume_mounts = [{"name": "vault-secret", "mountPath": token_path}] self.spec.update_vols_and_mounts(volumes, volume_mounts) self.spec.env.append( { "name": "MLRUN_SECRET_STORES__VAULT__ROLE", "value": f"project:{project_name}", } ) # In case remote URL is different than local URL, use it. Else, use the local URL vault_url = mlconf.secret_stores.vault.remote_url if vault_url == "": vault_url = mlconf.secret_stores.vault.url self.spec.env.append( {"name": "MLRUN_SECRET_STORES__VAULT__URL", "value": vault_url} ) def try_auto_mount_based_on_config(self): if self.spec.mount_applied: logger.debug("Mount already applied - not performing auto-mount") return auto_mount_type = AutoMountType(mlconf.storage.auto_mount_type) modifier = auto_mount_type.get_modifier() if not modifier: logger.debug("Auto mount disabled due to user selection") return mount_params_dict = mlconf.get_storage_auto_mount_params() self.apply(modifier(**mount_params_dict)) def kube_resource_spec_to_pod_spec( kube_resource_spec: KubeResourceSpec, container: client.V1Container ): return client.V1PodSpec( containers=[container], restart_policy="Never", volumes=kube_resource_spec.volumes, service_account=kube_resource_spec.service_account, node_name=kube_resource_spec.node_name, node_selector=kube_resource_spec.node_selector, affinity=kube_resource_spec.affinity, priority_class_name=kube_resource_spec.priority_class_name if len(mlconf.get_valid_function_priority_class_names()) else None, )
36.563506
131
0.632467
import os import typing import uuid from enum import Enum from kfp.dsl import ContainerOp, _container_op from kubernetes import client import mlrun.errors import mlrun.utils.regex from ..config import config as mlconf from ..utils import logger, normalize_name, update_in, verify_field_regex from .base import BaseRuntime, FunctionSpec from .utils import ( apply_kfp, generate_resources, get_item_name, get_resource_labels, set_named_item, ) class KubeResourceSpec(FunctionSpec): def __init__( self, command=None, args=None, image=None, mode=None, volumes=None, volume_mounts=None, env=None, resources=None, default_handler=None, pythonpath=None, entry_points=None, description=None, workdir=None, replicas=None, image_pull_policy=None, service_account=None, build=None, image_pull_secret=None, node_name=None, node_selector=None, affinity=None, mount_applied=False, priority_class_name=None, ): super().__init__( command=command, args=args, image=image, mode=mode, build=build, entry_points=entry_points, description=description, workdir=workdir, default_handler=default_handler, pythonpath=pythonpath, mount_applied=mount_applied, ) self._volumes = {} self._volume_mounts = {} self.volumes = volumes or [] self.volume_mounts = volume_mounts or [] self.env = env or [] self.resources = resources or {} self.replicas = replicas self.image_pull_policy = image_pull_policy self.service_account = service_account self.image_pull_secret = image_pull_secret self.node_name = node_name self.node_selector = ( node_selector or mlrun.mlconf.get_default_function_node_selector() ) self._affinity = affinity self.priority_class_name = ( priority_class_name or mlrun.mlconf.default_function_priority_class_name ) @property def volumes(self) -> list: return list(self._volumes.values()) @volumes.setter def volumes(self, volumes): self._volumes = {} if volumes: for vol in volumes: set_named_item(self._volumes, vol) @property def volume_mounts(self) -> list: return list(self._volume_mounts.values()) @volume_mounts.setter def volume_mounts(self, volume_mounts): self._volume_mounts = {} if volume_mounts: for volume_mount in volume_mounts: self._set_volume_mount(volume_mount) @property def affinity(self) -> client.V1Affinity: return self._affinity @affinity.setter def affinity(self, affinity): self._affinity = self._transform_affinity_to_k8s_class_instance(affinity) def to_dict(self, fields=None, exclude=None): struct = super().to_dict(fields, exclude=["affinity"]) api = client.ApiClient() struct["affinity"] = api.sanitize_for_serialization(self.affinity) return struct def update_vols_and_mounts(self, volumes, volume_mounts): if volumes: for vol in volumes: set_named_item(self._volumes, vol) if volume_mounts: for volume_mount in volume_mounts: self._set_volume_mount(volume_mount) def _get_affinity_as_k8s_class_instance(self): pass def _transform_affinity_to_k8s_class_instance(self, affinity): if not affinity: return None if isinstance(affinity, dict): api = client.ApiClient() # Taken from https://github.com/kubernetes-client/python/issues/977 affinity = api._ApiClient__deserialize(affinity, "V1Affinity") return affinity def _get_sanitized_affinity(self): if not self.affinity: return {} if isinstance(self.affinity, dict): # heuristic - if node_affinity is part of the dict it means to_dict on the kubernetes object performed, # there's nothing we can do at that point to transform it to the sanitized version if "node_affinity" in self.affinity: raise mlrun.errors.MLRunInvalidArgumentError( "Affinity must be instance of kubernetes' V1Affinity class" ) elif "nodeAffinity" in self.affinity: # then it's already the sanitized version return self.affinity api = client.ApiClient() return api.sanitize_for_serialization(self.affinity) def _set_volume_mount(self, volume_mount): volume_name = get_item_name(volume_mount, "name") volume_sub_path = get_item_name(volume_mount, "subPath") volume_mount_path = get_item_name(volume_mount, "mountPath") volume_mount_key = hash(f"{volume_name}-{volume_sub_path}-{volume_mount_path}") self._volume_mounts[volume_mount_key] = volume_mount class AutoMountType(str, Enum): none = "none" auto = "auto" v3io_credentials = "v3io_credentials" v3io_fuse = "v3io_fuse" pvc = "pvc" @classmethod def _missing_(cls, value): return AutoMountType.default() @staticmethod def default(): return AutoMountType.auto @classmethod def all_mount_modifiers(cls): return [ mlrun.v3io_cred.__name__, mlrun.mount_v3io.__name__, mlrun.platforms.other.mount_pvc.__name__, mlrun.auto_mount.__name__, ] @staticmethod def _get_auto_modifier(): if mlconf.igz_version != "": return mlrun.v3io_cred # Else, either pvc mount if it's configured or do nothing otherwise pvc_configured = ( "MLRUN_PVC_MOUNT" in os.environ or "pvc_name" in mlconf.get_storage_auto_mount_params() ) return mlrun.platforms.other.mount_pvc if pvc_configured else None def get_modifier(self): return { AutoMountType.none: None, AutoMountType.v3io_credentials: mlrun.v3io_cred, AutoMountType.v3io_fuse: mlrun.mount_v3io, AutoMountType.pvc: mlrun.platforms.other.mount_pvc, AutoMountType.auto: self._get_auto_modifier(), }[self] class KubeResource(BaseRuntime): kind = "job" _is_nested = True def __init__(self, spec=None, metadata=None): super().__init__(metadata, spec) self.verbose = False @property def spec(self) -> KubeResourceSpec: return self._spec @spec.setter def spec(self, spec): self._spec = self._verify_dict(spec, "spec", KubeResourceSpec) def to_dict(self, fields=None, exclude=None, strip=False): struct = super().to_dict(fields, exclude, strip=strip) api = client.ApiClient() struct = api.sanitize_for_serialization(struct) if strip: spec = struct["spec"] for attr in ["volumes", "volume_mounts"]: if attr in spec: del spec[attr] if "env" in spec and spec["env"]: for ev in spec["env"]: if ev["name"].startswith("V3IO_"): ev["value"] = "" return struct def apply(self, modify): old_op_handler = _container_op._register_op_handler _container_op._register_op_handler = lambda x: self.metadata.name cop = ContainerOp("name", "image") _container_op._register_op_handler = old_op_handler return apply_kfp(modify, cop, self) def set_env_from_secret(self, name, secret=None, secret_key=None): secret_key = secret_key or name value_from = client.V1EnvVarSource( secret_key_ref=client.V1SecretKeySelector(name=secret, key=secret_key) ) return self._set_env(name, value_from=value_from) def set_env(self, name, value): return self._set_env(name, value=str(value)) def is_env_exists(self, name): for env_var in self.spec.env: if get_item_name(env_var) == name: return True return False def _set_env(self, name, value=None, value_from=None): new_var = client.V1EnvVar(name=name, value=value, value_from=value_from) i = 0 for v in self.spec.env: if get_item_name(v) == name: self.spec.env[i] = new_var return self i += 1 self.spec.env.append(new_var) return self def set_envs(self, env_vars): for name, value in env_vars.items(): self.set_env(name, value) return self def gpus(self, gpus, gpu_type="nvidia.com/gpu"): update_in(self.spec.resources, ["limits", gpu_type], gpus) def with_limits(self, mem=None, cpu=None, gpus=None, gpu_type="nvidia.com/gpu"): self._verify_and_set_limits("resources", mem, cpu, gpus, gpu_type) def with_requests(self, mem=None, cpu=None): self._verify_and_set_requests("resources", mem, cpu) def with_node_selection( self, node_name: typing.Optional[str] = None, node_selector: typing.Optional[typing.Dict[str, str]] = None, affinity: typing.Optional[client.V1Affinity] = None, ): if node_name: self.spec.node_name = node_name if node_selector: self.spec.node_selector = node_selector if affinity: self.spec.affinity = affinity def with_priority_class(self, name: typing.Optional[str] = None): if name is None: name = mlconf.default_function_priority_class_name valid_priority_class_names = self.list_valid_and_default_priority_class_names()[ "valid_function_priority_class_names" ] if name not in valid_priority_class_names: message = "Priority class name not in available priority class names" logger.warning( message, priority_class_name=name, valid_priority_class_names=valid_priority_class_names, ) raise mlrun.errors.MLRunInvalidArgumentError(message) self.spec.priority_class_name = name def list_valid_and_default_priority_class_names(self): return { "default_function_priority_class_name": mlconf.default_function_priority_class_name, "valid_function_priority_class_names": mlconf.get_valid_function_priority_class_names(), } def _verify_and_set_limits( self, resources_field_name, mem=None, cpu=None, gpus=None, gpu_type="nvidia.com/gpu", ): if mem: verify_field_regex( f"function.spec.{resources_field_name}.limits.memory", mem, mlrun.utils.regex.k8s_resource_quantity_regex, ) if cpu: verify_field_regex( f"function.spec.{resources_field_name}.limits.cpu", cpu, mlrun.utils.regex.k8s_resource_quantity_regex, ) if gpus: verify_field_regex( f"function.spec.{resources_field_name}.limits.gpus", gpus, mlrun.utils.regex.k8s_resource_quantity_regex, ) update_in( getattr(self.spec, resources_field_name), "limits", generate_resources(mem=mem, cpu=cpu, gpus=gpus, gpu_type=gpu_type), ) def _verify_and_set_requests(self, resources_field_name, mem=None, cpu=None): if mem: verify_field_regex( f"function.spec.{resources_field_name}.requests.memory", mem, mlrun.utils.regex.k8s_resource_quantity_regex, ) if cpu: verify_field_regex( f"function.spec.{resources_field_name}.requests.cpu", cpu, mlrun.utils.regex.k8s_resource_quantity_regex, ) update_in( getattr(self.spec, resources_field_name), "requests", generate_resources(mem=mem, cpu=cpu), ) def _get_meta(self, runobj, unique=False): namespace = self._get_k8s().resolve_namespace() labels = get_resource_labels(self, runobj, runobj.spec.scrape_metrics) new_meta = client.V1ObjectMeta(namespace=namespace, labels=labels) name = runobj.metadata.name or "mlrun" norm_name = f"{normalize_name(name)}-" if unique: norm_name += uuid.uuid4().hex[:8] new_meta.name = norm_name runobj.set_label("mlrun/job", norm_name) else: new_meta.generate_name = norm_name return new_meta def _add_azure_vault_params_to_spec(self, k8s_secret_name=None): secret_name = ( k8s_secret_name or mlconf.secret_stores.azure_vault.default_secret_name ) if not secret_name: logger.warning( "No k8s secret provided. Azure key vault will not be available" ) return secret_path = mlconf.secret_stores.azure_vault.secret_path.replace("~", "/root") volumes = [ { "name": "azure-vault-secret", "secret": {"defaultMode": 420, "secretName": secret_name}, } ] volume_mounts = [{"name": "azure-vault-secret", "mountPath": secret_path}] self.spec.update_vols_and_mounts(volumes, volume_mounts) def _add_project_k8s_secrets_to_spec(self, secrets, runobj=None, project=None): project_name = project or runobj.metadata.project if project_name is None: logger.warning("No project provided. Cannot add k8s secrets") return secret_name = self._get_k8s().get_project_secret_name(project_name) existing_secret_keys = ( self._get_k8s().get_project_secret_keys(project_name) or {} ) # If no secrets were passed, we need all existing keys if not secrets: secrets = { key: self._secrets.k8s_env_variable_name_for_secret(key) for key in existing_secret_keys } for key, env_var_name in secrets.items(): if key in existing_secret_keys: self.set_env_from_secret(env_var_name, secret_name, key) def _add_vault_params_to_spec(self, runobj=None, project=None): project_name = project or runobj.metadata.project if project_name is None: logger.warning("No project provided. Cannot add vault parameters") return service_account_name = mlconf.secret_stores.vault.project_service_account_name.format( project=project_name ) project_vault_secret_name = self._get_k8s().get_project_vault_secret_name( project_name, service_account_name ) if project_vault_secret_name is None: logger.info(f"No vault secret associated with project {project_name}") return volumes = [ { "name": "vault-secret", "secret": {"defaultMode": 420, "secretName": project_vault_secret_name}, } ] # We cannot use expanduser() here, since the user in question is the user running in the pod # itself (which is root) and not where this code is running. That's why this hacky replacement is needed. token_path = mlconf.secret_stores.vault.token_path.replace("~", "/root") volume_mounts = [{"name": "vault-secret", "mountPath": token_path}] self.spec.update_vols_and_mounts(volumes, volume_mounts) self.spec.env.append( { "name": "MLRUN_SECRET_STORES__VAULT__ROLE", "value": f"project:{project_name}", } ) vault_url = mlconf.secret_stores.vault.remote_url if vault_url == "": vault_url = mlconf.secret_stores.vault.url self.spec.env.append( {"name": "MLRUN_SECRET_STORES__VAULT__URL", "value": vault_url} ) def try_auto_mount_based_on_config(self): if self.spec.mount_applied: logger.debug("Mount already applied - not performing auto-mount") return auto_mount_type = AutoMountType(mlconf.storage.auto_mount_type) modifier = auto_mount_type.get_modifier() if not modifier: logger.debug("Auto mount disabled due to user selection") return mount_params_dict = mlconf.get_storage_auto_mount_params() self.apply(modifier(**mount_params_dict)) def kube_resource_spec_to_pod_spec( kube_resource_spec: KubeResourceSpec, container: client.V1Container ): return client.V1PodSpec( containers=[container], restart_policy="Never", volumes=kube_resource_spec.volumes, service_account=kube_resource_spec.service_account, node_name=kube_resource_spec.node_name, node_selector=kube_resource_spec.node_selector, affinity=kube_resource_spec.affinity, priority_class_name=kube_resource_spec.priority_class_name if len(mlconf.get_valid_function_priority_class_names()) else None, )
true
true
f7248f61ab2a1f4fb14a9ffc0272aea78e49b1af
7,677
py
Python
var/spack/repos/builtin/packages/libint/package.py
rickgcv/spack
4d4f7393a3522c1f690a2e9a9683bf0f8f3b43a6
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
var/spack/repos/builtin/packages/libint/package.py
rickgcv/spack
4d4f7393a3522c1f690a2e9a9683bf0f8f3b43a6
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
var/spack/repos/builtin/packages/libint/package.py
rickgcv/spack
4d4f7393a3522c1f690a2e9a9683bf0f8f3b43a6
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
1
2020-11-08T10:26:48.000Z
2020-11-08T10:26:48.000Z
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import os from spack import * TUNE_VARIANTS = ( 'none', 'cp2k-lmax-4', 'cp2k-lmax-5', 'cp2k-lmax-6', 'cp2k-lmax-7', 'molgw-lmax-4', 'molgw-lmax-5', 'molgw-lmax-6', 'molgw-lmax-7', ) class Libint(AutotoolsPackage): """Libint is a high-performance library for computing Gaussian integrals in quantum mechanics. """ homepage = "https://github.com/evaleev/libint" url = "https://github.com/evaleev/libint/archive/v2.1.0.tar.gz" version('2.6.0', sha256='4ae47e8f0b5632c3d2a956469a7920896708e9f0e396ec10071b8181e4c8d9fa') version('2.4.2', sha256='86dff38065e69a3a51d15cfdc638f766044cb87e5c6682d960c14f9847e2eac3') version('2.4.1', sha256='0513be124563fdbbc7cd3c7043e221df1bda236a037027ba9343429a27db8ce4') version('2.4.0', sha256='52eb16f065406099dcfaceb12f9a7f7e329c9cfcf6ed9bfacb0cff7431dd6019') version('2.2.0', sha256='f737d485f33ac819d7f28c6ce303b1f3a2296bfd2c14f7c1323f8c5d370bb0e3') version('2.1.0', sha256='43c453a1663aa1c55294df89ff9ece3aefc8d1bbba5ea31dbfe71b2d812e24c8') version('1.1.6', sha256='f201b0c621df678cfe8bdf3990796b8976ff194aba357ae398f2f29b0e2985a6') version('1.1.5', sha256='ec8cd4a4ba1e1a98230165210c293632372f0e573acd878ed62e5ec6f8b6174b') variant('fortran', default=False, description='Build & install Fortran bindings') variant('tune', default='none', multi=False, values=TUNE_VARIANTS, description='Tune libint for use with the given package') # Build dependencies depends_on('autoconf@2.52:', type='build') depends_on('automake', type='build') depends_on('libtool', type='build') # Libint 2 dependencies depends_on('boost', when='@2:') depends_on('gmp', when='@2:') for tvariant in TUNE_VARIANTS[1:]: conflicts('tune={0}'.format(tvariant), when='@:2.5.99', msg=('for versions prior to 2.6, tuning for specific' 'codes/configurations is not supported')) def url_for_version(self, version): base_url = "https://github.com/evaleev/libint/archive" if version == Version('1.0.0'): return "{0}/LIBINT_1_00.tar.gz".format(base_url) elif version < Version('2.1.0'): return "{0}/release-{1}.tar.gz".format(base_url, version.dashed) else: return "{0}/v{1}.tar.gz".format(base_url, version) def autoreconf(self, spec, prefix): libtoolize() aclocal('-I', 'lib/autoconf') autoconf() if '@2.6.0:' in spec: # skip tarball creation and removal of dir with generated code filter_file(r'^(export::.*)\s+tgz$', r'\1', 'export/Makefile') @property def optflags(self): flags = '-O2' # Optimizations for the Intel compiler, suggested by CP2K # See ../libxc/package.py for rationale and doc. if '%intel' in self.spec: flags += ' -xSSE4.2 -axAVX,CORE-AVX2 -ipo' return flags def setup_build_environment(self, env): # Set optimization flags env.set('CFLAGS', self.optflags) env.set('CXXFLAGS', self.optflags) # Change AR to xiar if we compile with Intel and we # find the executable if '%intel' in self.spec and which('xiar'): env.set('AR', 'xiar') def configure_args(self): config_args = ['--enable-shared'] optflags = self.optflags # Optimization flag names have changed in libint 2 if self.version < Version('2.0.0'): config_args.extend([ '--with-cc-optflags={0}'.format(optflags), '--with-cxx-optflags={0}'.format(optflags) ]) else: config_args.extend([ '--with-cxx-optflags={0}'.format(optflags), '--with-cxxgen-optflags={0}'.format(optflags) ]) # Options required by CP2K, removed in libint 2 if self.version < Version('2.0.0'): config_args.extend([ '--with-libint-max-am=5', '--with-libderiv-max-am1=4' ]) if '@2.6.0:' in self.spec: config_args += ['--with-libint-exportdir=generated'] tune_value = self.spec.variants['tune'].value if tune_value.startswith('cp2k'): lmax = int(tune_value.split('-lmax-')[1]) config_args += [ '--enable-eri=1', '--enable-eri2=1', '--enable-eri3=1', '--with-max-am={0}'.format(lmax), '--with-eri-max-am={0},{1}'.format(lmax, lmax - 1), '--with-eri2-max-am={0},{1}'.format(lmax + 2, lmax + 1), '--with-eri3-max-am={0},{1}'.format(lmax + 2, lmax + 1), '--with-opt-am=3', # keep code-size at an acceptable limit, # cf. https://github.com/evaleev/libint/wiki#program-specific-notes: '--enable-generic-code', '--disable-unrolling', ] if tune_value.startswith('molgw'): lmax = int(tune_value.split('-lmax-')[1]) config_args += [ '--enable-1body=1', '--enable-eri=0', '--enable-eri2=0', '--enable-eri3=0', '--with-multipole-max-order=0', '--with-max-am={0}'.format(lmax), '--with-eri-max-am={0}'.format(lmax), '--with-eri2-max-am={0}'.format(lmax), '--with-eri3-max-am={0}'.format(lmax), '--with-opt-am=2', '--enable-contracted-ints', # keep code-size at an acceptable limit, # cf. https://github.com/evaleev/libint/wiki#program-specific-notes: '--enable-generic-code', '--disable-unrolling', ] return config_args @property def build_targets(self): if '@2.6.0:' in self.spec: return ['export'] return [] @when('@2.6.0:') def install(self, spec, prefix): """ Starting from libint 2.6.0 we're using the 2-stage build to get support for the Fortran bindings, required by some packages (CP2K notably). """ # upstream says that using configure/make for the generated code # is deprecated and one should use CMake, but with the currently # recent 2.7.0.b1 it still doesn't work with working_dir(os.path.join(self.build_directory, 'generated')): # straight from the AutotoolsPackage class: options = [ '--prefix={0}'.format(prefix), '--enable-shared', '--with-cxx-optflags={0}'.format(self.optflags), ] if '+fortran' in spec: options += ['--enable-fortran'] configure = Executable('./configure') configure(*options) make() make('install') def patch(self): # Use Fortran compiler to link the Fortran example, not the C++ # compiler if '+fortran' in self.spec and self.spec.satisfies('%nvhpc'): filter_file('$(CXX) $(CXXFLAGS)', '$(FC) $(FCFLAGS)', 'export/fortran/Makefile', string=True)
37.44878
95
0.558421
import os from spack import * TUNE_VARIANTS = ( 'none', 'cp2k-lmax-4', 'cp2k-lmax-5', 'cp2k-lmax-6', 'cp2k-lmax-7', 'molgw-lmax-4', 'molgw-lmax-5', 'molgw-lmax-6', 'molgw-lmax-7', ) class Libint(AutotoolsPackage): homepage = "https://github.com/evaleev/libint" url = "https://github.com/evaleev/libint/archive/v2.1.0.tar.gz" version('2.6.0', sha256='4ae47e8f0b5632c3d2a956469a7920896708e9f0e396ec10071b8181e4c8d9fa') version('2.4.2', sha256='86dff38065e69a3a51d15cfdc638f766044cb87e5c6682d960c14f9847e2eac3') version('2.4.1', sha256='0513be124563fdbbc7cd3c7043e221df1bda236a037027ba9343429a27db8ce4') version('2.4.0', sha256='52eb16f065406099dcfaceb12f9a7f7e329c9cfcf6ed9bfacb0cff7431dd6019') version('2.2.0', sha256='f737d485f33ac819d7f28c6ce303b1f3a2296bfd2c14f7c1323f8c5d370bb0e3') version('2.1.0', sha256='43c453a1663aa1c55294df89ff9ece3aefc8d1bbba5ea31dbfe71b2d812e24c8') version('1.1.6', sha256='f201b0c621df678cfe8bdf3990796b8976ff194aba357ae398f2f29b0e2985a6') version('1.1.5', sha256='ec8cd4a4ba1e1a98230165210c293632372f0e573acd878ed62e5ec6f8b6174b') variant('fortran', default=False, description='Build & install Fortran bindings') variant('tune', default='none', multi=False, values=TUNE_VARIANTS, description='Tune libint for use with the given package') depends_on('autoconf@2.52:', type='build') depends_on('automake', type='build') depends_on('libtool', type='build') depends_on('boost', when='@2:') depends_on('gmp', when='@2:') for tvariant in TUNE_VARIANTS[1:]: conflicts('tune={0}'.format(tvariant), when='@:2.5.99', msg=('for versions prior to 2.6, tuning for specific' 'codes/configurations is not supported')) def url_for_version(self, version): base_url = "https://github.com/evaleev/libint/archive" if version == Version('1.0.0'): return "{0}/LIBINT_1_00.tar.gz".format(base_url) elif version < Version('2.1.0'): return "{0}/release-{1}.tar.gz".format(base_url, version.dashed) else: return "{0}/v{1}.tar.gz".format(base_url, version) def autoreconf(self, spec, prefix): libtoolize() aclocal('-I', 'lib/autoconf') autoconf() if '@2.6.0:' in spec: filter_file(r'^(export::.*)\s+tgz$', r'\1', 'export/Makefile') @property def optflags(self): flags = '-O2' if '%intel' in self.spec: flags += ' -xSSE4.2 -axAVX,CORE-AVX2 -ipo' return flags def setup_build_environment(self, env): env.set('CFLAGS', self.optflags) env.set('CXXFLAGS', self.optflags) if '%intel' in self.spec and which('xiar'): env.set('AR', 'xiar') def configure_args(self): config_args = ['--enable-shared'] optflags = self.optflags if self.version < Version('2.0.0'): config_args.extend([ '--with-cc-optflags={0}'.format(optflags), '--with-cxx-optflags={0}'.format(optflags) ]) else: config_args.extend([ '--with-cxx-optflags={0}'.format(optflags), '--with-cxxgen-optflags={0}'.format(optflags) ]) if self.version < Version('2.0.0'): config_args.extend([ '--with-libint-max-am=5', '--with-libderiv-max-am1=4' ]) if '@2.6.0:' in self.spec: config_args += ['--with-libint-exportdir=generated'] tune_value = self.spec.variants['tune'].value if tune_value.startswith('cp2k'): lmax = int(tune_value.split('-lmax-')[1]) config_args += [ '--enable-eri=1', '--enable-eri2=1', '--enable-eri3=1', '--with-max-am={0}'.format(lmax), '--with-eri-max-am={0},{1}'.format(lmax, lmax - 1), '--with-eri2-max-am={0},{1}'.format(lmax + 2, lmax + 1), '--with-eri3-max-am={0},{1}'.format(lmax + 2, lmax + 1), '--with-opt-am=3', enable-generic-code', '--disable-unrolling', ] if tune_value.startswith('molgw'): lmax = int(tune_value.split('-lmax-')[1]) config_args += [ '--enable-1body=1', '--enable-eri=0', '--enable-eri2=0', '--enable-eri3=0', '--with-multipole-max-order=0', '--with-max-am={0}'.format(lmax), '--with-eri-max-am={0}'.format(lmax), '--with-eri2-max-am={0}'.format(lmax), '--with-eri3-max-am={0}'.format(lmax), '--with-opt-am=2', '--enable-contracted-ints', enable-generic-code', '--disable-unrolling', ] return config_args @property def build_targets(self): if '@2.6.0:' in self.spec: return ['export'] return [] @when('@2.6.0:') def install(self, spec, prefix): with working_dir(os.path.join(self.build_directory, 'generated')): # straight from the AutotoolsPackage class: options = [ '--prefix={0}'.format(prefix), '--enable-shared', '--with-cxx-optflags={0}'.format(self.optflags), ] if '+fortran' in spec: options += ['--enable-fortran'] configure = Executable('./configure') configure(*options) make() make('install') def patch(self): # Use Fortran compiler to link the Fortran example, not the C++ # compiler if '+fortran' in self.spec and self.spec.satisfies('%nvhpc'): filter_file('$(CXX) $(CXXFLAGS)', '$(FC) $(FCFLAGS)', 'export/fortran/Makefile', string=True)
true
true
f7248fd3c08c05e53ab12b8b1c1daa7aa98f5d00
11,277
py
Python
real_time.py
TheoPantaz/Control-of-robotic-vehicle-via-brain-activity
4cae5a69503659581f510c748f59f045d1f2b145
[ "MIT" ]
null
null
null
real_time.py
TheoPantaz/Control-of-robotic-vehicle-via-brain-activity
4cae5a69503659581f510c748f59f045d1f2b145
[ "MIT" ]
null
null
null
real_time.py
TheoPantaz/Control-of-robotic-vehicle-via-brain-activity
4cae5a69503659581f510c748f59f045d1f2b145
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Fri Jan 17 18:06:40 2020 @author: Kokkinos lines for telnet communication: 31,32,136,139,149,152,201,204,212,215,296 """ from threading import Thread import numpy as np import scipy.io as sio from pylsl import StreamInlet, resolve_stream from tkinter import * import telnetlib import pickle import threading from graphics import Graphics class rt(Graphics): def __init__(self, mode = 'IMvsall', tim_window = 4, vote_window = 4, overlap = 0, IM_window = 2, HOST = "192.168.4.1"): if mode == 'IMvsall' or 'Rvsall' or 'IMvsRest' or 'CSP_OVR' or'sync': self.mode = mode else: raise ValueError('Inappropriate mode value') # self.HOST = HOST # self.tn = telnetlib.Telnet(self.HOST) with open("visual_cues.txt") as f: content = f.readlines() content = [line.rstrip('\n') for line in content] self.Fs = int(content[0]) # Sampling Frequency self.Rdur = int(content[1]) # Rest visual cue duration self.Prdur = int(content[3]) content = np.array(content) self.vcN = len(content) # number of visual cues idxs = np.where(content == 'REST') self.RN = len(idxs[0]) # number of REST visual cues idxs = np.where(content == 'PREPARE') self.PRN = len(idxs[0]) self.IMN = len(content) - self.RN - self.PRN - 4 # number of Imaginary Movements visual cues try: self.IMdur = int(content[2]) self.recdur = self.RN * self.Rdur * self.Fs + self.IMN * self.IMdur * self.Fs + self.PRN * self.Prdur * self.Fs # duration of the recording except: IMdur = list(content[2].split(',')) self.IMdur = [int(i) for i in IMdur] self.IMdur = [np.random.randint(IMdur[0],IMdur[1]) for i in range(self.IMN)] self.recdur = self.RN * self.Rdur * self.Fs + sum(self.IMdur) * self.Fs + self.PRN * self.Prdur * self.Fs # duration of the recording self.content = np.delete(content,np.s_[:4]) if self.mode == 'sync': self.tim_window = self.IMdur * self.Fs self.vote_window = self.IMdur * self.Fs self.step = (self.IMdur + self.Prdur + self.Rdur) * self.Fs self.IM_window = 1 self.IMdur = [self.IMdur] * self.IMN else: self.tim_window = tim_window * self.Fs self.vote_window = vote_window * self.Fs self.overlap = overlap self.step = int(self.tim_window * (1 - self.overlap)) self.IM_window = IM_window Graphics.__init__(self) def load_bcis(self, filename): with open(filename, 'rb') as train: self.bcis = pickle.load(train) return self def begin_stream(self): print("looking for an EEG stream...") self.streams = resolve_stream('type', 'EEG') # create a new inlet to read from the stream self.inlet = StreamInlet(self.streams[0]) def pred_im(self, chunk, cSTR): self.pred = [] chunk = (np.array(chunk).T)/1000000000 chunk = chunk.reshape((1,chunk.shape[0],chunk.shape[1])) for i, bci in enumerate(self.bcis[:-1]): self.pred.append(bci.predict(chunk)) if self.pred[i] != 0: self.vote[i] += 1 else: self.vote[i] -= 1 self.pred.append(self.bcis[-1].predict(chunk)) if self.pred[-1] == 1: self.vote[-1] += 1 else: self.vote[-1] -= 1 if cSTR % self.vote_window == 0: self.pred_decision() def pred_decision(self): if self.mode == 'IMvsall' or self.mode == 'IMvsRest': if self.vote[0] <= 0 and self.vote[1] <= 0: self.prediction.extend([0]) print("pred:rest") elif self.vote[0] > 0: self.prediction.extend([1]) if self.begin: # self.tn.write(('1').encode('ascii')) self.begin = False else: # self.tn.write(('4').encode('ascii')) self.cIM += 1 print("pred:left") else: self.prediction.extend([2]) if self.begin: # self.tn.write(('1').encode('ascii')) self.begin = False else: # self.tn.write(('3').encode('ascii')) self.cIM += 1 print("pred:right") elif self.mode == 'Rvsall': if self.vote[0] <= 0: self.prediction.extend([0]) print("pred:rest") elif self.vote[1] > 0: self.prediction.extend([1]) if self.begin: self.tn.write(('1').encode('ascii')) self.begin = False else: self.tn.write(('4').encode('ascii')) self.cIM += 1 print("pred:left") else: self.prediction.extend([2]) if self.begin: self.tn.write(('1').encode('ascii')) self.begin = False else: self.tn.write(('3').encode('ascii')) self.cIM += 1 print("pred:right") else: self.prediction.extend([self.pred[-1]]) if self.pred[-1] == 0: print("pred:rest") elif self.pred[-1] == 1: if self.begin: # self.tn.write(('1').encode('ascii')) self.begin = False else: # self.tn.write(('4').encode('ascii')) self.cIM += 1 print("pred:left") else: if self.begin: # self.tn.write(('1').encode('ascii')) self.begin = False else: # self.tn.write(('3').encode('ascii')) self.cIM += 1 print("pred:right") self.vote = [0] * len(self.vote) def main_loop(self): self.load_bcis('train') self.vote = [0,0,0] self.pred = [] self.prediction = [] self.begin = True self.cIM = 0 cSTR = 0 cVC = 0 cdur = 0 cIMdur = 0 dur = self.Prdur buffer = [] while cSTR < self.recdur: sample, timestamp = self.inlet.pull_sample() buffer += [sample,] if cdur % (dur * self.Fs) == 0: if self.content[cVC] == 'REST': print("REST") self.delete_all() cdur = 0 dur = self.Rdur cVC = cVC+1 elif self.content[cVC] == 'LEFT': print("LEFT") self.left_arrow() cdur = 0 try: dur = self.IMdur[cIMdur] cIMdur += 1 except: dur = self.IMdur cVC = cVC+1 elif self.content[cVC] == 'RIGHT': print("RIGHT") self.right_arrow() cdur = 0 try: dur = self.IMdur[cIMdur] cIMdur += 1 except: dur = self.IMdur cVC = cVC+1 elif self.content[cVC]=='PREPARE': self.Concentration_Cross() cdur = 0 dur = self.Prdur cVC = cVC+1 if cSTR > 0 and cSTR % self.step == 0: #and self.cIM == 0: t1 = threading.Thread(target = self.pred_im, args=(buffer[-self.tim_window:],cSTR,)) t1.start() # elif cSTR > 0 and cSTR % self.step == 0: # # if self.cIM == self.IM_window: # self.cIM = 0 # else: # self.cIM += 1 cSTR = cSTR + 1 cdur = cdur + 1 # self.tn.write(('0').encode('ascii')) return buffer def save_recording(self, buffer): LABELS = [] trig = [] offset = (self.Rdur + self.Prdur) * self.Fs trig += [offset,] idxs=np.where(self.content=='REST') self.content = np.delete(self.content,idxs) idxs=np.where(self.content=='PREPARE') self.content = np.delete(self.content,idxs) try: for i, IMdur in enumerate(self.IMdur): trig += [IMdur * self.Fs + offset + trig[-1],] LABELS += [0] * offset if self.content[i] == 'LEFT': LABELS += [1] * IMdur * self.Fs else: LABELS += [2] * IMdur * self.Fs except: for i, visual_cue in enumerate(self.content): trig += [self.IMdur * self.Fs + offset + trig[-1],] LABELS += [0] * offset if visual_cue == 'LEFT': LABELS += [1] * self.IMdur * self.Fs else: LABELS += [2] * self.IMdur * self.Fs LABELS += [0] * self.Rdur * self. Fs pred = [[pr] * self.vote_window for pr in self.prediction] trig = np.array(trig) trig = np.delete(trig,-1) LABELS = np.array(LABELS) buffer = np.array(buffer) pred = np.array(pred).flatten() # create matlab files sio.savemat('trig.mat', {'trig':trig}) sio.savemat('rec.mat', {'rec':buffer}) sio.savemat('LABELS.mat', {'LABELS':LABELS}) sio.savemat('pred.mat', {'pred':pred}) if __name__ == '__main__': b_c_i = rt(mode = 'CSP_OVR', tim_window = 4, vote_window = 8, overlap = 0.5,IM_window = 0) b_c_i.load_bcis('train') b_c_i.begin_stream() buffer = b_c_i.main_loop() b_c_i.save_recording(buffer)
31.412256
151
0.423162
from threading import Thread import numpy as np import scipy.io as sio from pylsl import StreamInlet, resolve_stream from tkinter import * import telnetlib import pickle import threading from graphics import Graphics class rt(Graphics): def __init__(self, mode = 'IMvsall', tim_window = 4, vote_window = 4, overlap = 0, IM_window = 2, HOST = "192.168.4.1"): if mode == 'IMvsall' or 'Rvsall' or 'IMvsRest' or 'CSP_OVR' or'sync': self.mode = mode else: raise ValueError('Inappropriate mode value') with open("visual_cues.txt") as f: content = f.readlines() content = [line.rstrip('\n') for line in content] self.Fs = int(content[0]) self.Rdur = int(content[1]) self.Prdur = int(content[3]) content = np.array(content) self.vcN = len(content) idxs = np.where(content == 'REST') self.RN = len(idxs[0]) idxs = np.where(content == 'PREPARE') self.PRN = len(idxs[0]) self.IMN = len(content) - self.RN - self.PRN - 4 try: self.IMdur = int(content[2]) self.recdur = self.RN * self.Rdur * self.Fs + self.IMN * self.IMdur * self.Fs + self.PRN * self.Prdur * self.Fs except: IMdur = list(content[2].split(',')) self.IMdur = [int(i) for i in IMdur] self.IMdur = [np.random.randint(IMdur[0],IMdur[1]) for i in range(self.IMN)] self.recdur = self.RN * self.Rdur * self.Fs + sum(self.IMdur) * self.Fs + self.PRN * self.Prdur * self.Fs self.content = np.delete(content,np.s_[:4]) if self.mode == 'sync': self.tim_window = self.IMdur * self.Fs self.vote_window = self.IMdur * self.Fs self.step = (self.IMdur + self.Prdur + self.Rdur) * self.Fs self.IM_window = 1 self.IMdur = [self.IMdur] * self.IMN else: self.tim_window = tim_window * self.Fs self.vote_window = vote_window * self.Fs self.overlap = overlap self.step = int(self.tim_window * (1 - self.overlap)) self.IM_window = IM_window Graphics.__init__(self) def load_bcis(self, filename): with open(filename, 'rb') as train: self.bcis = pickle.load(train) return self def begin_stream(self): print("looking for an EEG stream...") self.streams = resolve_stream('type', 'EEG') self.inlet = StreamInlet(self.streams[0]) def pred_im(self, chunk, cSTR): self.pred = [] chunk = (np.array(chunk).T)/1000000000 chunk = chunk.reshape((1,chunk.shape[0],chunk.shape[1])) for i, bci in enumerate(self.bcis[:-1]): self.pred.append(bci.predict(chunk)) if self.pred[i] != 0: self.vote[i] += 1 else: self.vote[i] -= 1 self.pred.append(self.bcis[-1].predict(chunk)) if self.pred[-1] == 1: self.vote[-1] += 1 else: self.vote[-1] -= 1 if cSTR % self.vote_window == 0: self.pred_decision() def pred_decision(self): if self.mode == 'IMvsall' or self.mode == 'IMvsRest': if self.vote[0] <= 0 and self.vote[1] <= 0: self.prediction.extend([0]) print("pred:rest") elif self.vote[0] > 0: self.prediction.extend([1]) if self.begin: self.begin = False else: self.cIM += 1 print("pred:left") else: self.prediction.extend([2]) if self.begin: self.begin = False else: self.cIM += 1 print("pred:right") elif self.mode == 'Rvsall': if self.vote[0] <= 0: self.prediction.extend([0]) print("pred:rest") elif self.vote[1] > 0: self.prediction.extend([1]) if self.begin: self.tn.write(('1').encode('ascii')) self.begin = False else: self.tn.write(('4').encode('ascii')) self.cIM += 1 print("pred:left") else: self.prediction.extend([2]) if self.begin: self.tn.write(('1').encode('ascii')) self.begin = False else: self.tn.write(('3').encode('ascii')) self.cIM += 1 print("pred:right") else: self.prediction.extend([self.pred[-1]]) if self.pred[-1] == 0: print("pred:rest") elif self.pred[-1] == 1: if self.begin: self.begin = False else: self.cIM += 1 print("pred:left") else: if self.begin: self.begin = False else: self.cIM += 1 print("pred:right") self.vote = [0] * len(self.vote) def main_loop(self): self.load_bcis('train') self.vote = [0,0,0] self.pred = [] self.prediction = [] self.begin = True self.cIM = 0 cSTR = 0 cVC = 0 cdur = 0 cIMdur = 0 dur = self.Prdur buffer = [] while cSTR < self.recdur: sample, timestamp = self.inlet.pull_sample() buffer += [sample,] if cdur % (dur * self.Fs) == 0: if self.content[cVC] == 'REST': print("REST") self.delete_all() cdur = 0 dur = self.Rdur cVC = cVC+1 elif self.content[cVC] == 'LEFT': print("LEFT") self.left_arrow() cdur = 0 try: dur = self.IMdur[cIMdur] cIMdur += 1 except: dur = self.IMdur cVC = cVC+1 elif self.content[cVC] == 'RIGHT': print("RIGHT") self.right_arrow() cdur = 0 try: dur = self.IMdur[cIMdur] cIMdur += 1 except: dur = self.IMdur cVC = cVC+1 elif self.content[cVC]=='PREPARE': self.Concentration_Cross() cdur = 0 dur = self.Prdur cVC = cVC+1 if cSTR > 0 and cSTR % self.step == 0: t1 = threading.Thread(target = self.pred_im, args=(buffer[-self.tim_window:],cSTR,)) t1.start() cSTR = cSTR + 1 cdur = cdur + 1 return buffer def save_recording(self, buffer): LABELS = [] trig = [] offset = (self.Rdur + self.Prdur) * self.Fs trig += [offset,] idxs=np.where(self.content=='REST') self.content = np.delete(self.content,idxs) idxs=np.where(self.content=='PREPARE') self.content = np.delete(self.content,idxs) try: for i, IMdur in enumerate(self.IMdur): trig += [IMdur * self.Fs + offset + trig[-1],] LABELS += [0] * offset if self.content[i] == 'LEFT': LABELS += [1] * IMdur * self.Fs else: LABELS += [2] * IMdur * self.Fs except: for i, visual_cue in enumerate(self.content): trig += [self.IMdur * self.Fs + offset + trig[-1],] LABELS += [0] * offset if visual_cue == 'LEFT': LABELS += [1] * self.IMdur * self.Fs else: LABELS += [2] * self.IMdur * self.Fs LABELS += [0] * self.Rdur * self. Fs pred = [[pr] * self.vote_window for pr in self.prediction] trig = np.array(trig) trig = np.delete(trig,-1) LABELS = np.array(LABELS) buffer = np.array(buffer) pred = np.array(pred).flatten() sio.savemat('trig.mat', {'trig':trig}) sio.savemat('rec.mat', {'rec':buffer}) sio.savemat('LABELS.mat', {'LABELS':LABELS}) sio.savemat('pred.mat', {'pred':pred}) if __name__ == '__main__': b_c_i = rt(mode = 'CSP_OVR', tim_window = 4, vote_window = 8, overlap = 0.5,IM_window = 0) b_c_i.load_bcis('train') b_c_i.begin_stream() buffer = b_c_i.main_loop() b_c_i.save_recording(buffer)
true
true
f7249040b586c4a22845aeab9c8fe2659a7527ef
1,126
py
Python
python/6.net/3.Ext/1.shell_server.py
dunitian/BaseCode
4855ef4c6dd7c95d7239d2048832d8acfe26e084
[ "Apache-2.0" ]
25
2018-06-13T08:13:44.000Z
2020-11-19T14:02:11.000Z
python/6.net/3.Ext/1.shell_server.py
dunitian/BaseCode
4855ef4c6dd7c95d7239d2048832d8acfe26e084
[ "Apache-2.0" ]
null
null
null
python/6.net/3.Ext/1.shell_server.py
dunitian/BaseCode
4855ef4c6dd7c95d7239d2048832d8acfe26e084
[ "Apache-2.0" ]
13
2018-06-13T08:13:38.000Z
2022-01-06T06:45:07.000Z
from socket import socket def main(): with socket() as tcp_socket: tcp_socket.bind(('', 8080)) tcp_socket.listen() client_socket, client_addr = tcp_socket.accept() with client_socket: print(f"[肉鸡{client_addr}已经上线:]\n") while True: cmd = input("$ ") client_socket.send(cmd.encode("utf-8")) data = client_socket.recv(2048) if data: print(data.decode("utf-8")) if __name__ == "__main__": main() # from socketserver import ThreadingTCPServer, BaseRequestHandler # class MyHandler(BaseRequestHandler): # def handle(self): # print(f"[肉鸡{self.client_address}已经上线:]\n") # while True: # cmd = input("$ ") # self.request.send(cmd.encode("utf-8")) # data = self.request.recv(2048) # # if data: # print(data.decode("utf-8")) # if __name__ == "__main__": # ThreadingTCPServer.allow_reuse_address = True # with ThreadingTCPServer(('', 8080), MyHandler) as server: # server.serve_forever()
29.631579
65
0.562167
from socket import socket def main(): with socket() as tcp_socket: tcp_socket.bind(('', 8080)) tcp_socket.listen() client_socket, client_addr = tcp_socket.accept() with client_socket: print(f"[肉鸡{client_addr}已经上线:]\n") while True: cmd = input("$ ") client_socket.send(cmd.encode("utf-8")) data = client_socket.recv(2048) if data: print(data.decode("utf-8")) if __name__ == "__main__": main()
true
true
f724904db04d50387041ae9ec1500db44a5daf4d
17,114
py
Python
src/models/trainer_ext.py
Katarina11/PreSumm
616e72f038d512e9e9112af375d66a0b2e3db6cd
[ "MIT" ]
null
null
null
src/models/trainer_ext.py
Katarina11/PreSumm
616e72f038d512e9e9112af375d66a0b2e3db6cd
[ "MIT" ]
null
null
null
src/models/trainer_ext.py
Katarina11/PreSumm
616e72f038d512e9e9112af375d66a0b2e3db6cd
[ "MIT" ]
null
null
null
import os import numpy as np import torch from tensorboardX import SummaryWriter import distributed from models.reporter_ext import ReportMgr, Statistics from others.logging import logger from others.utils import test_rouge, rouge_results_to_str def _tally_parameters(model): n_params = sum([p.nelement() for p in model.parameters()]) return n_params def build_trainer(args, device_id, model, optim): """ Simplify `Trainer` creation based on user `opt`s* Args: opt (:obj:`Namespace`): user options (usually from argument parsing) model (:obj:`onmt.models.NMTModel`): the model to train fields (dict): dict of fields optim (:obj:`onmt.utils.Optimizer`): optimizer used during training data_type (str): string describing the type of data e.g. "text", "img", "audio" model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object used to save the model """ grad_accum_count = args.accum_count n_gpu = args.world_size if device_id >= 0: gpu_rank = int(args.gpu_ranks[device_id]) else: gpu_rank = 0 n_gpu = 0 print('gpu_rank %d' % gpu_rank) tensorboard_log_dir = args.model_path writer = SummaryWriter(tensorboard_log_dir, comment="Unmt") report_manager = ReportMgr(args.report_every, start_time=-1, tensorboard_writer=writer) trainer = Trainer(args, model, optim, grad_accum_count, n_gpu, gpu_rank, report_manager) # print(tr) if (model): n_params = _tally_parameters(model) logger.info('* number of parameters: %d' % n_params) return trainer class Trainer(object): """ Class that controls the training process. Args: model(:py:class:`onmt.models.model.NMTModel`): translation model to train train_loss(:obj:`onmt.utils.loss.LossComputeBase`): training loss computation valid_loss(:obj:`onmt.utils.loss.LossComputeBase`): training loss computation optim(:obj:`onmt.utils.optimizers.Optimizer`): the optimizer responsible for update trunc_size(int): length of truncated back propagation through time shard_size(int): compute loss in shards of this size for efficiency data_type(string): type of the source input: [text|img|audio] norm_method(string): normalization methods: [sents|tokens] grad_accum_count(int): accumulate gradients this many times. report_manager(:obj:`onmt.utils.ReportMgrBase`): the object that creates reports, or None model_saver(:obj:`onmt.models.ModelSaverBase`): the saver is used to save a checkpoint. Thus nothing will be saved if this parameter is None """ def __init__(self, args, model, optim, grad_accum_count=1, n_gpu=1, gpu_rank=1, report_manager=None): # Basic attributes. self.args = args self.save_checkpoint_steps = args.save_checkpoint_steps self.model = model self.optim = optim self.grad_accum_count = grad_accum_count self.n_gpu = n_gpu self.gpu_rank = gpu_rank self.report_manager = report_manager self.loss = torch.nn.BCELoss(reduction='none') assert grad_accum_count > 0 # Set model in training mode. if (model): self.model.train() def train(self, train_iter_fct, train_steps, valid_iter_fct=None, valid_steps=-1): """ The main training loops. by iterating over training data (i.e. `train_iter_fct`) and running validation (i.e. iterating over `valid_iter_fct` Args: train_iter_fct(function): a function that returns the train iterator. e.g. something like train_iter_fct = lambda: generator(*args, **kwargs) valid_iter_fct(function): same as train_iter_fct, for valid data train_steps(int): valid_steps(int): save_checkpoint_steps(int): Return: None """ logger.info('Start training...') # step = self.optim._step + 1 step = self.optim._step + 1 true_batchs = [] accum = 0 normalization = 0 train_iter = train_iter_fct() total_stats = Statistics() report_stats = Statistics() self._start_report_manager(start_time=total_stats.start_time) while step <= train_steps: reduce_counter = 0 for i, batch in enumerate(train_iter): if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank): true_batchs.append(batch) normalization += batch.batch_size accum += 1 if accum == self.grad_accum_count: reduce_counter += 1 if self.n_gpu > 1: normalization = sum(distributed .all_gather_list (normalization)) self._gradient_accumulation( true_batchs, normalization, total_stats, report_stats) report_stats = self._maybe_report_training( step, train_steps, self.optim.learning_rate, report_stats) true_batchs = [] accum = 0 normalization = 0 if (step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0): self._save(step) step += 1 if step > train_steps: break train_iter = train_iter_fct() return total_stats def validate(self, valid_iter, step=0): """ Validate model. valid_iter: validate data iterator Returns: :obj:`nmt.Statistics`: validation loss statistics """ # Set model in validating mode. self.model.eval() stats = Statistics() with torch.no_grad(): for batch in valid_iter: src = batch.src labels = batch.src_sent_labels segs = batch.segs clss = batch.clss mask = batch.mask_src mask_cls = batch.mask_cls sent_scores, mask = self.model(src, segs, clss, mask, mask_cls) loss = self.loss(sent_scores, labels.float()) loss = (loss * mask.float()).sum() batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels)) stats.update(batch_stats) self._report_step(0, step, valid_stats=stats) return stats def test(self, test_iter, step, cal_lead=False, cal_oracle=False): """ Validate model. valid_iter: validate data iterator Returns: :obj:`nmt.Statistics`: validation loss statistics """ # Set model in validating mode. def _get_ngrams(n, text): ngram_set = set() text_length = len(text) max_index_ngram_start = text_length - n for i in range(max_index_ngram_start + 1): ngram_set.add(tuple(text[i:i + n])) return ngram_set def _block_tri(c, p): tri_c = _get_ngrams(3, c.split()) for s in p: tri_s = _get_ngrams(3, s.split()) if len(tri_c.intersection(tri_s)) > 0: return True return False if (not cal_lead and not cal_oracle): self.model.eval() stats = Statistics() can_path = '%s_step%d.candidate' % (self.args.result_path, step) gold_path = '%s_step%d.gold' % (self.args.result_path, step) ## src_path = '%s_step%d.src' % (self.args.result_path, step) f = open(src_path, 'w') ## sent_no = 0 with open(can_path, 'w') as save_pred: with open(gold_path, 'w') as save_gold: with torch.no_grad(): for batch in test_iter: src = batch.src labels = batch.src_sent_labels segs = batch.segs clss = batch.clss mask = batch.mask_src mask_cls = batch.mask_cls gold = [] pred = [] src_fix = [] if (cal_lead): selected_ids = [list(range(batch.clss.size(1)))] * batch.batch_size elif (cal_oracle): selected_ids = [[j for j in range(batch.clss.size(1)) if labels[i][j] == 1] for i in range(batch.batch_size)] else: sent_scores, mask = self.model(src, segs, clss, mask, mask_cls) if labels.float().size()[1] != 0: loss = self.loss(sent_scores, labels.float()) else: continue loss = (loss * mask.float()).sum() batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels)) stats.update(batch_stats) sent_scores = sent_scores + mask.float() sent_scores = sent_scores.cpu().data.numpy() selected_ids = np.argsort(-sent_scores, 1) if len(selected_ids[0]) < 7: continue # selected_ids = np.sort(selected_ids,1) for i, idx in enumerate(selected_ids): _pred = [] if (len(batch.src_str[i]) == 0): continue for j in selected_ids[i][:len(batch.src_str[i])]: if (j >= len(batch.src_str[i])): continue candidate = batch.src_str[i][j].strip() if (self.args.block_trigram): if (not _block_tri(candidate, _pred)): _pred.append(candidate) else: _pred.append(candidate) if ((not cal_oracle) and (not self.args.recall_eval) and len(_pred) == 3): break _pred = '<q>'.join(_pred) if (self.args.recall_eval): _pred = ' '.join(_pred.split()[:len(batch.tgt_str[i].split())]) pred.append(_pred) gold.append(batch.tgt_str[i]) src_fix.append(batch.src_str[i]) sent_no += 1 # print(sent_no) # print('gold', gold) # print(gold_path) for i in range(len(gold)): save_gold.write(str(sent_no) + "_" + str(i) + ': ' + gold[i].strip() + '\n') for i in range(len(pred)): save_pred.write(str(sent_no) + "_" + str(i) + ': ' + pred[i].strip() + '\n') for i in range(len(pred)): f.write(str(sent_no) + "_" + str(i) + ': ' + '###'.join(src_fix[i]).strip()+'\n') f.close() if (step != -1 and self.args.report_rouge): rouges = test_rouge(self.args.temp_dir, can_path, gold_path) logger.info('Rouges at step %d \n%s' % (step, rouge_results_to_str(rouges))) self._report_step(0, step, valid_stats=stats) return stats def _gradient_accumulation(self, true_batchs, normalization, total_stats, report_stats): if self.grad_accum_count > 1: self.model.zero_grad() for batch in true_batchs: if self.grad_accum_count == 1: self.model.zero_grad() src = batch.src labels = batch.src_sent_labels segs = batch.segs clss = batch.clss mask = batch.mask_src mask_cls = batch.mask_cls sent_scores, mask = self.model(src, segs, clss, mask, mask_cls) loss = self.loss(sent_scores, labels.float()) loss = (loss * mask.float()).sum() (loss / loss.numel()).backward() # loss.div(float(normalization)).backward() batch_stats = Statistics(float(loss.cpu().data.numpy()), normalization) total_stats.update(batch_stats) report_stats.update(batch_stats) # 4. Update the parameters and statistics. if self.grad_accum_count == 1: # Multi GPU gradient gather if self.n_gpu > 1: grads = [p.grad.data for p in self.model.parameters() if p.requires_grad and p.grad is not None] distributed.all_reduce_and_rescale_tensors( grads, float(1)) self.optim.step() # in case of multi step gradient accumulation, # update only after accum batches if self.grad_accum_count > 1: if self.n_gpu > 1: grads = [p.grad.data for p in self.model.parameters() if p.requires_grad and p.grad is not None] distributed.all_reduce_and_rescale_tensors( grads, float(1)) self.optim.step() def _save(self, step): real_model = self.model # real_generator = (self.generator.module # if isinstance(self.generator, torch.nn.DataParallel) # else self.generator) model_state_dict = real_model.state_dict() # generator_state_dict = real_generator.state_dict() checkpoint = { 'model': model_state_dict, # 'generator': generator_state_dict, 'opt': self.args, 'optims': self.optim, } checkpoint_path = os.path.join(self.args.model_path, 'model_step_%d.pt' % step) logger.info("Saving checkpoint %s" % checkpoint_path) # checkpoint_path = '%s_step_%d.pt' % (FLAGS.model_path, step) if (not os.path.exists(checkpoint_path)): torch.save(checkpoint, checkpoint_path) return checkpoint, checkpoint_path def _start_report_manager(self, start_time=None): """ Simple function to start report manager (if any) """ if self.report_manager is not None: if start_time is None: self.report_manager.start() else: self.report_manager.start_time = start_time def _maybe_gather_stats(self, stat): """ Gather statistics in multi-processes cases Args: stat(:obj:onmt.utils.Statistics): a Statistics object to gather or None (it returns None in this case) Returns: stat: the updated (or unchanged) stat object """ if stat is not None and self.n_gpu > 1: return Statistics.all_gather_stats(stat) return stat def _maybe_report_training(self, step, num_steps, learning_rate, report_stats): """ Simple function to report training stats (if report_manager is set) see `onmt.utils.ReportManagerBase.report_training` for doc """ if self.report_manager is not None: return self.report_manager.report_training( step, num_steps, learning_rate, report_stats, multigpu=self.n_gpu > 1) def _report_step(self, learning_rate, step, train_stats=None, valid_stats=None): """ Simple function to report stats (if report_manager is set) see `onmt.utils.ReportManagerBase.report_step` for doc """ if self.report_manager is not None: return self.report_manager.report_step( learning_rate, step, train_stats=train_stats, valid_stats=valid_stats) def _maybe_save(self, step): """ Save the model if a model saver is set """ if self.model_saver is not None: self.model_saver.maybe_save(step)
38.895455
112
0.521678
import os import numpy as np import torch from tensorboardX import SummaryWriter import distributed from models.reporter_ext import ReportMgr, Statistics from others.logging import logger from others.utils import test_rouge, rouge_results_to_str def _tally_parameters(model): n_params = sum([p.nelement() for p in model.parameters()]) return n_params def build_trainer(args, device_id, model, optim): grad_accum_count = args.accum_count n_gpu = args.world_size if device_id >= 0: gpu_rank = int(args.gpu_ranks[device_id]) else: gpu_rank = 0 n_gpu = 0 print('gpu_rank %d' % gpu_rank) tensorboard_log_dir = args.model_path writer = SummaryWriter(tensorboard_log_dir, comment="Unmt") report_manager = ReportMgr(args.report_every, start_time=-1, tensorboard_writer=writer) trainer = Trainer(args, model, optim, grad_accum_count, n_gpu, gpu_rank, report_manager) if (model): n_params = _tally_parameters(model) logger.info('* number of parameters: %d' % n_params) return trainer class Trainer(object): def __init__(self, args, model, optim, grad_accum_count=1, n_gpu=1, gpu_rank=1, report_manager=None): self.args = args self.save_checkpoint_steps = args.save_checkpoint_steps self.model = model self.optim = optim self.grad_accum_count = grad_accum_count self.n_gpu = n_gpu self.gpu_rank = gpu_rank self.report_manager = report_manager self.loss = torch.nn.BCELoss(reduction='none') assert grad_accum_count > 0 if (model): self.model.train() def train(self, train_iter_fct, train_steps, valid_iter_fct=None, valid_steps=-1): logger.info('Start training...') step = self.optim._step + 1 true_batchs = [] accum = 0 normalization = 0 train_iter = train_iter_fct() total_stats = Statistics() report_stats = Statistics() self._start_report_manager(start_time=total_stats.start_time) while step <= train_steps: reduce_counter = 0 for i, batch in enumerate(train_iter): if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank): true_batchs.append(batch) normalization += batch.batch_size accum += 1 if accum == self.grad_accum_count: reduce_counter += 1 if self.n_gpu > 1: normalization = sum(distributed .all_gather_list (normalization)) self._gradient_accumulation( true_batchs, normalization, total_stats, report_stats) report_stats = self._maybe_report_training( step, train_steps, self.optim.learning_rate, report_stats) true_batchs = [] accum = 0 normalization = 0 if (step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0): self._save(step) step += 1 if step > train_steps: break train_iter = train_iter_fct() return total_stats def validate(self, valid_iter, step=0): self.model.eval() stats = Statistics() with torch.no_grad(): for batch in valid_iter: src = batch.src labels = batch.src_sent_labels segs = batch.segs clss = batch.clss mask = batch.mask_src mask_cls = batch.mask_cls sent_scores, mask = self.model(src, segs, clss, mask, mask_cls) loss = self.loss(sent_scores, labels.float()) loss = (loss * mask.float()).sum() batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels)) stats.update(batch_stats) self._report_step(0, step, valid_stats=stats) return stats def test(self, test_iter, step, cal_lead=False, cal_oracle=False): def _get_ngrams(n, text): ngram_set = set() text_length = len(text) max_index_ngram_start = text_length - n for i in range(max_index_ngram_start + 1): ngram_set.add(tuple(text[i:i + n])) return ngram_set def _block_tri(c, p): tri_c = _get_ngrams(3, c.split()) for s in p: tri_s = _get_ngrams(3, s.split()) if len(tri_c.intersection(tri_s)) > 0: return True return False if (not cal_lead and not cal_oracle): self.model.eval() stats = Statistics() can_path = '%s_step%d.candidate' % (self.args.result_path, step) gold_path = '%s_step%d.gold' % (self.args.result_path, step) src_path = '%s_step%d.src' % (self.args.result_path, step) f = open(src_path, 'w') sent_no = 0 with open(can_path, 'w') as save_pred: with open(gold_path, 'w') as save_gold: with torch.no_grad(): for batch in test_iter: src = batch.src labels = batch.src_sent_labels segs = batch.segs clss = batch.clss mask = batch.mask_src mask_cls = batch.mask_cls gold = [] pred = [] src_fix = [] if (cal_lead): selected_ids = [list(range(batch.clss.size(1)))] * batch.batch_size elif (cal_oracle): selected_ids = [[j for j in range(batch.clss.size(1)) if labels[i][j] == 1] for i in range(batch.batch_size)] else: sent_scores, mask = self.model(src, segs, clss, mask, mask_cls) if labels.float().size()[1] != 0: loss = self.loss(sent_scores, labels.float()) else: continue loss = (loss * mask.float()).sum() batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels)) stats.update(batch_stats) sent_scores = sent_scores + mask.float() sent_scores = sent_scores.cpu().data.numpy() selected_ids = np.argsort(-sent_scores, 1) if len(selected_ids[0]) < 7: continue for i, idx in enumerate(selected_ids): _pred = [] if (len(batch.src_str[i]) == 0): continue for j in selected_ids[i][:len(batch.src_str[i])]: if (j >= len(batch.src_str[i])): continue candidate = batch.src_str[i][j].strip() if (self.args.block_trigram): if (not _block_tri(candidate, _pred)): _pred.append(candidate) else: _pred.append(candidate) if ((not cal_oracle) and (not self.args.recall_eval) and len(_pred) == 3): break _pred = '<q>'.join(_pred) if (self.args.recall_eval): _pred = ' '.join(_pred.split()[:len(batch.tgt_str[i].split())]) pred.append(_pred) gold.append(batch.tgt_str[i]) src_fix.append(batch.src_str[i]) sent_no += 1 for i in range(len(gold)): save_gold.write(str(sent_no) + "_" + str(i) + ': ' + gold[i].strip() + '\n') for i in range(len(pred)): save_pred.write(str(sent_no) + "_" + str(i) + ': ' + pred[i].strip() + '\n') for i in range(len(pred)): f.write(str(sent_no) + "_" + str(i) + ': ' + '###'.join(src_fix[i]).strip()+'\n') f.close() if (step != -1 and self.args.report_rouge): rouges = test_rouge(self.args.temp_dir, can_path, gold_path) logger.info('Rouges at step %d \n%s' % (step, rouge_results_to_str(rouges))) self._report_step(0, step, valid_stats=stats) return stats def _gradient_accumulation(self, true_batchs, normalization, total_stats, report_stats): if self.grad_accum_count > 1: self.model.zero_grad() for batch in true_batchs: if self.grad_accum_count == 1: self.model.zero_grad() src = batch.src labels = batch.src_sent_labels segs = batch.segs clss = batch.clss mask = batch.mask_src mask_cls = batch.mask_cls sent_scores, mask = self.model(src, segs, clss, mask, mask_cls) loss = self.loss(sent_scores, labels.float()) loss = (loss * mask.float()).sum() (loss / loss.numel()).backward() batch_stats = Statistics(float(loss.cpu().data.numpy()), normalization) total_stats.update(batch_stats) report_stats.update(batch_stats) if self.grad_accum_count == 1: if self.n_gpu > 1: grads = [p.grad.data for p in self.model.parameters() if p.requires_grad and p.grad is not None] distributed.all_reduce_and_rescale_tensors( grads, float(1)) self.optim.step() if self.grad_accum_count > 1: if self.n_gpu > 1: grads = [p.grad.data for p in self.model.parameters() if p.requires_grad and p.grad is not None] distributed.all_reduce_and_rescale_tensors( grads, float(1)) self.optim.step() def _save(self, step): real_model = self.model model_state_dict = real_model.state_dict() checkpoint = { 'model': model_state_dict, 'opt': self.args, 'optims': self.optim, } checkpoint_path = os.path.join(self.args.model_path, 'model_step_%d.pt' % step) logger.info("Saving checkpoint %s" % checkpoint_path) if (not os.path.exists(checkpoint_path)): torch.save(checkpoint, checkpoint_path) return checkpoint, checkpoint_path def _start_report_manager(self, start_time=None): if self.report_manager is not None: if start_time is None: self.report_manager.start() else: self.report_manager.start_time = start_time def _maybe_gather_stats(self, stat): if stat is not None and self.n_gpu > 1: return Statistics.all_gather_stats(stat) return stat def _maybe_report_training(self, step, num_steps, learning_rate, report_stats): if self.report_manager is not None: return self.report_manager.report_training( step, num_steps, learning_rate, report_stats, multigpu=self.n_gpu > 1) def _report_step(self, learning_rate, step, train_stats=None, valid_stats=None): if self.report_manager is not None: return self.report_manager.report_step( learning_rate, step, train_stats=train_stats, valid_stats=valid_stats) def _maybe_save(self, step): if self.model_saver is not None: self.model_saver.maybe_save(step)
true
true
f724910c59315867a42a56fab3deb36f5d3adb7a
46,062
py
Python
tensorflow/contrib/bayesflow/python/ops/hmc_impl.py
M155K4R4/Tensorflow
e5e03ef3148303b3dfed89a1492dedf92b45be25
[ "Apache-2.0" ]
5
2019-05-23T02:59:21.000Z
2020-02-05T08:20:23.000Z
tensorflow/contrib/bayesflow/python/ops/hmc_impl.py
M155K4R4/Tensorflow
e5e03ef3148303b3dfed89a1492dedf92b45be25
[ "Apache-2.0" ]
2
2017-08-01T21:11:06.000Z
2017-08-01T23:07:02.000Z
tensorflow/contrib/bayesflow/python/ops/hmc_impl.py
M155K4R4/Tensorflow
e5e03ef3148303b3dfed89a1492dedf92b45be25
[ "Apache-2.0" ]
2
2019-09-05T06:43:24.000Z
2019-09-07T07:58:34.000Z
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Hamiltonian Monte Carlo, a gradient-based MCMC algorithm. @@sample_chain @@sample_annealed_importance_chain @@kernel """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import gradients_impl as gradients_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops.distributions import util as distributions_util __all__ = [ "sample_chain", "sample_annealed_importance_chain", "kernel", ] KernelResults = collections.namedtuple( "KernelResults", [ "acceptance_probs", "current_grads_target_log_prob", # "Current result" means "accepted". "current_target_log_prob", # "Current result" means "accepted". "energy_change", "is_accepted", "proposed_grads_target_log_prob", "proposed_state", "proposed_target_log_prob", "random_positive", ]) def _make_dummy_kernel_results( dummy_state, dummy_target_log_prob, dummy_grads_target_log_prob): return KernelResults( acceptance_probs=dummy_target_log_prob, current_grads_target_log_prob=dummy_grads_target_log_prob, current_target_log_prob=dummy_target_log_prob, energy_change=dummy_target_log_prob, is_accepted=array_ops.ones_like(dummy_target_log_prob, dtypes.bool), proposed_grads_target_log_prob=dummy_grads_target_log_prob, proposed_state=dummy_state, proposed_target_log_prob=dummy_target_log_prob, random_positive=dummy_target_log_prob, ) def sample_chain( num_results, target_log_prob_fn, current_state, step_size, num_leapfrog_steps, num_burnin_steps=0, num_steps_between_results=0, seed=None, current_target_log_prob=None, current_grads_target_log_prob=None, name=None): """Runs multiple iterations of one or more Hamiltonian Monte Carlo chains. Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC) algorithm that takes a series of gradient-informed steps to produce a Metropolis proposal. This function samples from an HMC Markov chain at `current_state` and whose stationary distribution has log-unnormalized-density `target_log_prob_fn()`. This function samples from multiple chains in parallel. It assumes that the the leftmost dimensions of (each) `current_state` (part) index an independent chain. The function `target_log_prob_fn()` sums log-probabilities across event dimensions (i.e., current state (part) rightmost dimensions). Each element of the output of `target_log_prob_fn()` represents the (possibly unnormalized) log-probability of the joint distribution over (all) the current state (parts). The `current_state` can be represented as a single `Tensor` or a `list` of `Tensors` which collectively represent the current state. When specifying a `list`, one must also specify a list of `step_size`s. Note: `target_log_prob_fn` is called exactly twice. Only one out of every `num_steps_between_samples + 1` steps is included in the returned results. This "thinning" comes at a cost of reduced statistical power, while reducing memory requirements and autocorrelation. For more discussion see [1]. [1]: "Statistically efficient thinning of a Markov chain sampler." Art B. Owen. April 2017. http://statweb.stanford.edu/~owen/reports/bestthinning.pdf #### Examples: ##### Sample from a diagonal-variance Gaussian. ```python tfd = tf.contrib.distributions def make_likelihood(true_variances): return tfd.MultivariateNormalDiag( scale_diag=tf.sqrt(true_variances)) dims = 10 dtype = np.float32 true_variances = tf.linspace(dtype(1), dtype(3), dims) likelihood = make_likelihood(true_variances) states, kernel_results = hmc.sample_chain( num_results=1000, target_log_prob_fn=likelihood.log_prob, current_state=tf.zeros(dims), step_size=0.5, num_leapfrog_steps=2, num_burnin_steps=500) # Compute sample stats. sample_mean = tf.reduce_mean(states, axis=0) sample_var = tf.reduce_mean( tf.squared_difference(states, sample_mean), axis=0) ``` ##### Sampling from factor-analysis posteriors with known factors. I.e., ```none for i=1..n: w[i] ~ Normal(0, eye(d)) # prior x[i] ~ Normal(loc=matmul(w[i], F)) # likelihood ``` where `F` denotes factors. ```python tfd = tf.contrib.distributions def make_prior(dims, dtype): return tfd.MultivariateNormalDiag( loc=tf.zeros(dims, dtype)) def make_likelihood(weights, factors): return tfd.MultivariateNormalDiag( loc=tf.tensordot(weights, factors, axes=[[0], [-1]])) # Setup data. num_weights = 10 num_factors = 4 num_chains = 100 dtype = np.float32 prior = make_prior(num_weights, dtype) weights = prior.sample(num_chains) factors = np.random.randn(num_factors, num_weights).astype(dtype) x = make_likelihood(weights, factors).sample(num_chains) def target_log_prob(w): # Target joint is: `f(w) = p(w, x | factors)`. return prior.log_prob(w) + make_likelihood(w, factors).log_prob(x) # Get `num_results` samples from `num_chains` independent chains. chains_states, kernels_results = hmc.sample_chain( num_results=1000, target_log_prob_fn=target_log_prob, current_state=tf.zeros([num_chains, dims], dtype), step_size=0.1, num_leapfrog_steps=2, num_burnin_steps=500) # Compute sample stats. sample_mean = tf.reduce_mean(chains_states, axis=[0, 1]) sample_var = tf.reduce_mean( tf.squared_difference(chains_states, sample_mean), axis=[0, 1]) ``` Args: num_results: Integer number of Markov chain draws. target_log_prob_fn: Python callable which takes an argument like `current_state` (or `*current_state` if it's a list) and returns its (possibly unnormalized) log-density under the target distribution. current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). The first `r` dimensions index independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`. step_size: `Tensor` or Python `list` of `Tensor`s representing the step size for the leapfrog integrator. Must broadcast with the shape of `current_state`. Larger step sizes lead to faster progress, but too-large step sizes make rejection exponentially more likely. When possible, it's often helpful to match per-variable step sizes to the standard deviations of the target distribution in each variable. num_leapfrog_steps: Integer number of steps to run the leapfrog integrator for. Total progress per HMC step is roughly proportional to `step_size * num_leapfrog_steps`. num_burnin_steps: Integer number of chain steps to take before starting to collect results. Default value: 0 (i.e., no burn-in). num_steps_between_results: Integer number of chain steps between collecting a result. Only one out of every `num_steps_between_samples + 1` steps is included in the returned results. This "thinning" comes at a cost of reduced statistical power, while reducing memory requirements and autocorrelation. For more discussion see [1]. Default value: 0 (i.e., no subsampling). seed: Python integer to seed the random number generator. current_target_log_prob: (Optional) `Tensor` representing the value of `target_log_prob_fn` at the `current_state`. The only reason to specify this argument is to reduce TF graph size. Default value: `None` (i.e., compute as needed). current_grads_target_log_prob: (Optional) Python list of `Tensor`s representing gradient of `target_log_prob` at the `current_state` and wrt the `current_state`. Must have same shape as `current_state`. The only reason to specify this argument is to reduce TF graph size. Default value: `None` (i.e., compute as needed). name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., "hmc_sample_chain"). Returns: accepted_states: Tensor or Python list of `Tensor`s representing the state(s) of the Markov chain(s) at each result step. Has same shape as input `current_state` but with a prepended `num_results`-size dimension. kernel_results: `collections.namedtuple` of internal calculations used to advance the chain. """ with ops.name_scope( name, "hmc_sample_chain", [num_results, current_state, step_size, num_leapfrog_steps, num_burnin_steps, num_steps_between_results, seed, current_target_log_prob, current_grads_target_log_prob]): with ops.name_scope("initialize"): [ current_state, step_size, current_target_log_prob, current_grads_target_log_prob, ] = _prepare_args( target_log_prob_fn, current_state, step_size, current_target_log_prob, current_grads_target_log_prob) num_results = ops.convert_to_tensor( num_results, dtype=dtypes.int32, name="num_results") num_leapfrog_steps = ops.convert_to_tensor( num_leapfrog_steps, dtype=dtypes.int32, name="num_leapfrog_steps") num_burnin_steps = ops.convert_to_tensor( num_burnin_steps, dtype=dtypes.int32, name="num_burnin_steps") num_steps_between_results = ops.convert_to_tensor( num_steps_between_results, dtype=dtypes.int32, name="num_steps_between_results") def _run_chain(num_steps, current_state, kernel_results): """Runs the chain(s) for `num_steps`.""" def _loop_body(iter_, current_state, kernel_results): return [iter_ + 1] + list(kernel( target_log_prob_fn, current_state, step_size, num_leapfrog_steps, seed, kernel_results.current_target_log_prob, kernel_results.current_grads_target_log_prob)) while_loop_kwargs = dict( cond=lambda iter_, *args: iter_ < num_steps, body=_loop_body, loop_vars=[ np.int32(0), current_state, kernel_results, ], ) if seed is not None: while_loop_kwargs["parallel_iterations"] = 1 return control_flow_ops.while_loop( **while_loop_kwargs)[1:] # Lop-off "iter_". def _scan_body(args_list, iter_): """Closure which implements `tf.scan` body.""" current_state, kernel_results = args_list return _run_chain( 1 + array_ops.where(math_ops.equal(iter_, 0), num_burnin_steps, num_steps_between_results), current_state, kernel_results) scan_kwargs = dict( fn=_scan_body, elems=math_ops.range(num_results), # iter_: used to choose burnin. initializer=[ current_state, _make_dummy_kernel_results( current_state, current_target_log_prob, current_grads_target_log_prob), ]) if seed is not None: scan_kwargs["parallel_iterations"] = 1 return functional_ops.scan(**scan_kwargs) def sample_annealed_importance_chain( proposal_log_prob_fn, num_steps, target_log_prob_fn, current_state, step_size, num_leapfrog_steps, seed=None, name=None): """Runs annealed importance sampling (AIS) to estimate normalizing constants. This function uses Hamiltonian Monte Carlo to sample from a series of distributions that slowly interpolates between an initial "proposal" distribution: `exp(proposal_log_prob_fn(x) - proposal_log_normalizer)` and the target distribution: `exp(target_log_prob_fn(x) - target_log_normalizer)`, accumulating importance weights along the way. The product of these importance weights gives an unbiased estimate of the ratio of the normalizing constants of the initial distribution and the target distribution: `E[exp(ais_weights)] = exp(target_log_normalizer - proposal_log_normalizer)`. Note: `proposal_log_prob_fn` and `target_log_prob_fn` are called exactly three times (although this may be reduced to two times, in the future). #### Examples: ##### Estimate the normalizing constant of a log-gamma distribution. ```python tfd = tf.contrib.distributions # Run 100 AIS chains in parallel num_chains = 100 dims = 20 dtype = np.float32 proposal = tfd.MultivatiateNormalDiag( loc=tf.zeros([dims], dtype=dtype)) target = tfd.TransformedDistribution( distribution=tfd.Gamma(concentration=dtype(2), rate=dtype(3)), bijector=tfd.bijectors.Invert(tfd.bijectors.Exp()), event_shape=[dims]) chains_state, ais_weights, kernels_results = ( hmc.sample_annealed_importance_chain( proposal_log_prob_fn=proposal.log_prob, num_steps=1000, target_log_prob_fn=target.log_prob, step_size=0.2, current_state=proposal.sample(num_chains), num_leapfrog_steps=2)) log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights) - np.log(num_chains)) log_true_normalizer = tf.lgamma(2.) - 2. * tf.log(3.) ``` ##### Estimate marginal likelihood of a Bayesian regression model. ```python tfd = tf.contrib.distributions def make_prior(dims, dtype): return tfd.MultivariateNormalDiag( loc=tf.zeros(dims, dtype)) def make_likelihood(weights, x): return tfd.MultivariateNormalDiag( loc=tf.tensordot(weights, x, axes=[[0], [-1]])) # Run 100 AIS chains in parallel num_chains = 100 dims = 10 dtype = np.float32 # Make training data. x = np.random.randn(num_chains, dims).astype(dtype) true_weights = np.random.randn(dims).astype(dtype) y = np.dot(x, true_weights) + np.random.randn(num_chains) # Setup model. prior = make_prior(dims, dtype) def target_log_prob_fn(weights): return prior.log_prob(weights) + make_likelihood(weights, x).log_prob(y) proposal = tfd.MultivariateNormalDiag( loc=tf.zeros(dims, dtype)) weight_samples, ais_weights, kernel_results = ( hmc.sample_annealed_importance_chain( num_steps=1000, proposal_log_prob_fn=proposal.log_prob, target_log_prob_fn=target_log_prob_fn current_state=tf.zeros([num_chains, dims], dtype), step_size=0.1, num_leapfrog_steps=2)) log_normalizer_estimate = (tf.reduce_logsumexp(ais_weights) - np.log(num_chains)) ``` Args: proposal_log_prob_fn: Python callable that returns the log density of the initial distribution. num_steps: Integer number of Markov chain updates to run. More iterations means more expense, but smoother annealing between q and p, which in turn means exponentially lower variance for the normalizing constant estimator. target_log_prob_fn: Python callable which takes an argument like `current_state` (or `*current_state` if it's a list) and returns its (possibly unnormalized) log-density under the target distribution. current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). The first `r` dimensions index independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`. step_size: `Tensor` or Python `list` of `Tensor`s representing the step size for the leapfrog integrator. Must broadcast with the shape of `current_state`. Larger step sizes lead to faster progress, but too-large step sizes make rejection exponentially more likely. When possible, it's often helpful to match per-variable step sizes to the standard deviations of the target distribution in each variable. num_leapfrog_steps: Integer number of steps to run the leapfrog integrator for. Total progress per HMC step is roughly proportional to `step_size * num_leapfrog_steps`. seed: Python integer to seed the random number generator. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., "hmc_sample_annealed_importance_chain"). Returns: accepted_state: `Tensor` or Python list of `Tensor`s representing the state(s) of the Markov chain(s) at the final iteration. Has same shape as input `current_state`. ais_weights: Tensor with the estimated weight(s). Has shape matching `target_log_prob_fn(current_state)`. kernel_results: `collections.namedtuple` of internal calculations used to advance the chain. """ def make_convex_combined_log_prob_fn(iter_): def _fn(*args): p = proposal_log_prob_fn(*args) t = target_log_prob_fn(*args) dtype = p.dtype.base_dtype beta = (math_ops.cast(iter_ + 1, dtype) / math_ops.cast(num_steps, dtype)) return (1. - beta) * p + beta * t return _fn with ops.name_scope( name, "hmc_sample_annealed_importance_chain", [num_steps, current_state, step_size, num_leapfrog_steps, seed]): with ops.name_scope("initialize"): [ current_state, step_size, current_log_prob, current_grads_log_prob, ] = _prepare_args( make_convex_combined_log_prob_fn(iter_=0), current_state, step_size, description="convex_combined_log_prob") num_steps = ops.convert_to_tensor( num_steps, dtype=dtypes.int32, name="num_steps") num_leapfrog_steps = ops.convert_to_tensor( num_leapfrog_steps, dtype=dtypes.int32, name="num_leapfrog_steps") def _loop_body(iter_, ais_weights, current_state, kernel_results): """Closure which implements `tf.while_loop` body.""" current_state_parts = (list(current_state) if _is_list_like(current_state) else [current_state]) # TODO(b/72994218): Consider refactoring things to avoid this unecessary # call. ais_weights += ((target_log_prob_fn(*current_state_parts) - proposal_log_prob_fn(*current_state_parts)) / math_ops.cast(num_steps, ais_weights.dtype)) return [iter_ + 1, ais_weights] + list(kernel( make_convex_combined_log_prob_fn(iter_), current_state, step_size, num_leapfrog_steps, seed, kernel_results.current_target_log_prob, kernel_results.current_grads_target_log_prob)) while_loop_kwargs = dict( cond=lambda iter_, *args: iter_ < num_steps, body=_loop_body, loop_vars=[ np.int32(0), # iter_ array_ops.zeros_like(current_log_prob), # ais_weights current_state, _make_dummy_kernel_results(current_state, current_log_prob, current_grads_log_prob), ]) if seed is not None: while_loop_kwargs["parallel_iterations"] = 1 [ais_weights, current_state, kernel_results] = control_flow_ops.while_loop( **while_loop_kwargs)[1:] # Lop-off "iter_". return [current_state, ais_weights, kernel_results] def kernel(target_log_prob_fn, current_state, step_size, num_leapfrog_steps, seed=None, current_target_log_prob=None, current_grads_target_log_prob=None, name=None): """Runs one iteration of Hamiltonian Monte Carlo. Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC) algorithm that takes a series of gradient-informed steps to produce a Metropolis proposal. This function applies one step of HMC to randomly update the variable `x`. This function can update multiple chains in parallel. It assumes that all leftmost dimensions of `current_state` index independent chain states (and are therefore updated independently). The output of `target_log_prob_fn()` should sum log-probabilities across all event dimensions. Slices along the rightmost dimensions may have different target distributions; for example, `current_state[0, :]` could have a different target distribution from `current_state[1, :]`. This is up to `target_log_prob_fn()`. (The number of independent chains is `tf.size(target_log_prob_fn(*current_state))`.) #### Examples: ##### Simple chain with warm-up. ```python tfd = tf.contrib.distributions # Tuning acceptance rates: dtype = np.float32 target_accept_rate = 0.631 num_warmup_iter = 500 num_chain_iter = 500 x = tf.get_variable(name="x", initializer=dtype(1)) step_size = tf.get_variable(name="step_size", initializer=dtype(1)) target = tfd.Normal(loc=dtype(0), scale=dtype(1)) new_x, other_results = hmc.kernel( target_log_prob_fn=target.log_prob, current_state=x, step_size=step_size, num_leapfrog_steps=3)[:4] x_update = x.assign(new_x) step_size_update = step_size.assign_add( step_size * tf.where( other_results.acceptance_probs > target_accept_rate, 0.01, -0.01)) warmup = tf.group([x_update, step_size_update]) tf.global_variables_initializer().run() sess.graph.finalize() # No more graph building. # Warm up the sampler and adapt the step size for _ in xrange(num_warmup_iter): sess.run(warmup) # Collect samples without adapting step size samples = np.zeros([num_chain_iter]) for i in xrange(num_chain_iter): _, x_, target_log_prob_, grad_ = sess.run([ x_update, x, other_results.target_log_prob, other_results.grads_target_log_prob]) samples[i] = x_ print(samples.mean(), samples.std()) ``` ##### Sample from more complicated posterior. I.e., ```none W ~ MVN(loc=0, scale=sigma * eye(dims)) for i=1...num_samples: X[i] ~ MVN(loc=0, scale=eye(dims)) eps[i] ~ Normal(loc=0, scale=1) Y[i] = X[i].T * W + eps[i] ``` ```python tfd = tf.contrib.distributions def make_training_data(num_samples, dims, sigma): dt = np.asarray(sigma).dtype zeros = tf.zeros(dims, dtype=dt) x = tfd.MultivariateNormalDiag( loc=zeros).sample(num_samples, seed=1) w = tfd.MultivariateNormalDiag( loc=zeros, scale_identity_multiplier=sigma).sample(seed=2) noise = tfd.Normal( loc=dt(0), scale=dt(1)).sample(num_samples, seed=3) y = tf.tensordot(x, w, axes=[[1], [0]]) + noise return y, x, w def make_prior(sigma, dims): # p(w | sigma) return tfd.MultivariateNormalDiag( loc=tf.zeros([dims], dtype=sigma.dtype), scale_identity_multiplier=sigma) def make_likelihood(x, w): # p(y | x, w) return tfd.MultivariateNormalDiag( loc=tf.tensordot(x, w, axes=[[1], [0]])) # Setup assumptions. dtype = np.float32 num_samples = 150 dims = 10 num_iters = int(5e3) true_sigma = dtype(0.5) y, x, true_weights = make_training_data(num_samples, dims, true_sigma) # Estimate of `log(true_sigma)`. log_sigma = tf.get_variable(name="log_sigma", initializer=dtype(0)) sigma = tf.exp(log_sigma) # State of the Markov chain. weights = tf.get_variable( name="weights", initializer=np.random.randn(dims).astype(dtype)) prior = make_prior(sigma, dims) def joint_log_prob_fn(w): # f(w) = log p(w, y | x) return prior.log_prob(w) + make_likelihood(x, w).log_prob(y) weights_update = weights.assign( hmc.kernel(target_log_prob_fn=joint_log_prob, current_state=weights, step_size=0.1, num_leapfrog_steps=5)[0]) with tf.control_dependencies([weights_update]): loss = -prior.log_prob(weights) optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) log_sigma_update = optimizer.minimize(loss, var_list=[log_sigma]) sess.graph.finalize() # No more graph building. tf.global_variables_initializer().run() sigma_history = np.zeros(num_iters, dtype) weights_history = np.zeros([num_iters, dims], dtype) for i in xrange(num_iters): _, sigma_, weights_, _ = sess.run([log_sigma_update, sigma, weights]) weights_history[i, :] = weights_ sigma_history[i] = sigma_ true_weights_ = sess.run(true_weights) # Should converge to something close to true_sigma. plt.plot(sigma_history); plt.ylabel("sigma"); plt.xlabel("iteration"); ``` Args: target_log_prob_fn: Python callable which takes an argument like `current_state` (or `*current_state` if it's a list) and returns its (possibly unnormalized) log-density under the target distribution. current_state: `Tensor` or Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). The first `r` dimensions index independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`. step_size: `Tensor` or Python `list` of `Tensor`s representing the step size for the leapfrog integrator. Must broadcast with the shape of `current_state`. Larger step sizes lead to faster progress, but too-large step sizes make rejection exponentially more likely. When possible, it's often helpful to match per-variable step sizes to the standard deviations of the target distribution in each variable. num_leapfrog_steps: Integer number of steps to run the leapfrog integrator for. Total progress per HMC step is roughly proportional to `step_size * num_leapfrog_steps`. seed: Python integer to seed the random number generator. current_target_log_prob: (Optional) `Tensor` representing the value of `target_log_prob_fn` at the `current_state`. The only reason to specify this argument is to reduce TF graph size. Default value: `None` (i.e., compute as needed). current_grads_target_log_prob: (Optional) Python list of `Tensor`s representing gradient of `current_target_log_prob` at the `current_state` and wrt the `current_state`. Must have same shape as `current_state`. The only reason to specify this argument is to reduce TF graph size. Default value: `None` (i.e., compute as needed). name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., "hmc_kernel"). Returns: accepted_state: Tensor or Python list of `Tensor`s representing the state(s) of the Markov chain(s) at each result step. Has same shape as `current_state`. kernel_results: `collections.namedtuple` of internal calculations used to advance the chain. Raises: ValueError: if there isn't one `step_size` or a list with same length as `current_state`. """ with ops.name_scope( name, "hmc_kernel", [current_state, step_size, num_leapfrog_steps, seed, current_target_log_prob, current_grads_target_log_prob]): with ops.name_scope("initialize"): [current_state_parts, step_sizes, current_target_log_prob, current_grads_target_log_prob] = _prepare_args( target_log_prob_fn, current_state, step_size, current_target_log_prob, current_grads_target_log_prob, maybe_expand=True) independent_chain_ndims = distributions_util.prefer_static_rank( current_target_log_prob) current_momentums = [] for s in current_state_parts: current_momentums.append(random_ops.random_normal( shape=array_ops.shape(s), dtype=s.dtype.base_dtype, seed=seed)) seed = distributions_util.gen_new_seed( seed, salt="hmc_kernel_momentums") num_leapfrog_steps = ops.convert_to_tensor( num_leapfrog_steps, dtype=dtypes.int32, name="num_leapfrog_steps") [ proposed_momentums, proposed_state_parts, proposed_target_log_prob, proposed_grads_target_log_prob, ] = _leapfrog_integrator(current_momentums, target_log_prob_fn, current_state_parts, step_sizes, num_leapfrog_steps, current_target_log_prob, current_grads_target_log_prob) energy_change = _compute_energy_change(current_target_log_prob, current_momentums, proposed_target_log_prob, proposed_momentums, independent_chain_ndims) # u < exp(min(-energy, 0)), where u~Uniform[0,1) # ==> -log(u) >= max(e, 0) # ==> -log(u) >= e # (Perhaps surprisingly, we don't have a better way to obtain a random # uniform from positive reals, i.e., `tf.random_uniform(minval=0, # maxval=np.inf)` won't work.) random_uniform = random_ops.random_uniform( shape=array_ops.shape(energy_change), dtype=energy_change.dtype, seed=seed) random_positive = -math_ops.log(random_uniform) is_accepted = random_positive >= energy_change accepted_target_log_prob = array_ops.where(is_accepted, proposed_target_log_prob, current_target_log_prob) accepted_state_parts = [_choose(is_accepted, proposed_state_part, current_state_part, independent_chain_ndims) for current_state_part, proposed_state_part in zip(current_state_parts, proposed_state_parts)] accepted_grads_target_log_prob = [ _choose(is_accepted, proposed_grad, grad, independent_chain_ndims) for proposed_grad, grad in zip(proposed_grads_target_log_prob, current_grads_target_log_prob)] maybe_flatten = lambda x: x if _is_list_like(current_state) else x[0] return [ maybe_flatten(accepted_state_parts), KernelResults( acceptance_probs=math_ops.exp(math_ops.minimum(-energy_change, 0.)), current_grads_target_log_prob=accepted_grads_target_log_prob, current_target_log_prob=accepted_target_log_prob, energy_change=energy_change, is_accepted=is_accepted, proposed_grads_target_log_prob=proposed_grads_target_log_prob, proposed_state=maybe_flatten(proposed_state_parts), proposed_target_log_prob=proposed_target_log_prob, random_positive=random_positive, ), ] def _leapfrog_integrator(current_momentums, target_log_prob_fn, current_state_parts, step_sizes, num_leapfrog_steps, current_target_log_prob=None, current_grads_target_log_prob=None, name=None): """Applies `num_leapfrog_steps` of the leapfrog integrator. Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`. #### Examples: ##### Simple quadratic potential. ```python tfd = tf.contrib.distributions dims = 10 num_iter = int(1e3) dtype = np.float32 position = tf.placeholder(np.float32) momentum = tf.placeholder(np.float32) [ new_momentums, new_positions, ] = hmc._leapfrog_integrator( current_momentums=[momentum], target_log_prob_fn=tfd.MultivariateNormalDiag( loc=tf.zeros(dims, dtype)).log_prob, current_state_parts=[position], step_sizes=0.1, num_leapfrog_steps=3)[:2] sess.graph.finalize() # No more graph building. momentum_ = np.random.randn(dims).astype(dtype) position_ = np.random.randn(dims).astype(dtype) positions = np.zeros([num_iter, dims], dtype) for i in xrange(num_iter): position_, momentum_ = sess.run( [new_momentums[0], new_position[0]], feed_dict={position: position_, momentum: momentum_}) positions[i] = position_ plt.plot(positions[:, 0]); # Sinusoidal. ``` Args: current_momentums: Tensor containing the value(s) of the momentum variable(s) to update. target_log_prob_fn: Python callable which takes an argument like `*current_state_parts` and returns its (possibly unnormalized) log-density under the target distribution. current_state_parts: Python `list` of `Tensor`s representing the current state(s) of the Markov chain(s). The first `independent_chain_ndims` of the `Tensor`(s) index different chains. step_sizes: Python `list` of `Tensor`s representing the step size for the leapfrog integrator. Must broadcast with the shape of `current_state_parts`. Larger step sizes lead to faster progress, but too-large step sizes make rejection exponentially more likely. When possible, it's often helpful to match per-variable step sizes to the standard deviations of the target distribution in each variable. num_leapfrog_steps: Integer number of steps to run the leapfrog integrator for. Total progress per HMC step is roughly proportional to `step_size * num_leapfrog_steps`. current_target_log_prob: (Optional) `Tensor` representing the value of `target_log_prob_fn(*current_state_parts)`. The only reason to specify this argument is to reduce TF graph size. Default value: `None` (i.e., compute as needed). current_grads_target_log_prob: (Optional) Python list of `Tensor`s representing gradient of `target_log_prob_fn(*current_state_parts`) wrt `current_state_parts`. Must have same shape as `current_state_parts`. The only reason to specify this argument is to reduce TF graph size. Default value: `None` (i.e., compute as needed). name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., "hmc_leapfrog_integrator"). Returns: proposed_momentums: Updated value of the momentum. proposed_state_parts: Tensor or Python list of `Tensor`s representing the state(s) of the Markov chain(s) at each result step. Has same shape as input `current_state_parts`. proposed_target_log_prob: `Tensor` representing the value of `target_log_prob_fn` at `accepted_state`. proposed_grads_target_log_prob: Gradient of `proposed_target_log_prob` wrt `accepted_state`. Raises: ValueError: if `len(momentums) != len(state_parts)`. ValueError: if `len(state_parts) != len(step_sizes)`. ValueError: if `len(state_parts) != len(grads_target_log_prob)`. TypeError: if `not target_log_prob.dtype.is_floating`. """ def _loop_body(step, current_momentums, current_state_parts, ignore_current_target_log_prob, # pylint: disable=unused-argument current_grads_target_log_prob): return [step + 1] + list(_leapfrog_step(current_momentums, target_log_prob_fn, current_state_parts, step_sizes, current_grads_target_log_prob)) with ops.name_scope( name, "hmc_leapfrog_integrator", [current_momentums, current_state_parts, step_sizes, num_leapfrog_steps, current_target_log_prob, current_grads_target_log_prob]): if len(current_momentums) != len(current_state_parts): raise ValueError("`momentums` must be in one-to-one correspondence " "with `state_parts`") num_leapfrog_steps = ops.convert_to_tensor(num_leapfrog_steps, name="num_leapfrog_steps") current_target_log_prob, current_grads_target_log_prob = ( _maybe_call_fn_and_grads( target_log_prob_fn, current_state_parts, current_target_log_prob, current_grads_target_log_prob)) return control_flow_ops.while_loop( cond=lambda iter_, *args: iter_ < num_leapfrog_steps, body=_loop_body, loop_vars=[ np.int32(0), # iter_ current_momentums, current_state_parts, current_target_log_prob, current_grads_target_log_prob, ], back_prop=False)[1:] # Lop-off "iter_". def _leapfrog_step(current_momentums, target_log_prob_fn, current_state_parts, step_sizes, current_grads_target_log_prob, name=None): """Applies one step of the leapfrog integrator.""" with ops.name_scope( name, "_leapfrog_step", [current_momentums, current_state_parts, step_sizes, current_grads_target_log_prob]): proposed_momentums = [m + 0.5 * ss * g for m, ss, g in zip(current_momentums, step_sizes, current_grads_target_log_prob)] proposed_state_parts = [x + ss * m for x, ss, m in zip(current_state_parts, step_sizes, proposed_momentums)] proposed_target_log_prob = target_log_prob_fn(*proposed_state_parts) if not proposed_target_log_prob.dtype.is_floating: raise TypeError("`target_log_prob_fn` must produce a `Tensor` " "with `float` `dtype`.") proposed_grads_target_log_prob = gradients_ops.gradients( proposed_target_log_prob, proposed_state_parts) if any(g is None for g in proposed_grads_target_log_prob): raise ValueError( "Encountered `None` gradient. Does your target `target_log_prob_fn` " "access all `tf.Variable`s via `tf.get_variable`?\n" " current_state_parts: {}\n" " proposed_state_parts: {}\n" " proposed_grads_target_log_prob: {}".format( current_state_parts, proposed_state_parts, proposed_grads_target_log_prob)) proposed_momentums = [m + 0.5 * ss * g for m, ss, g in zip(proposed_momentums, step_sizes, proposed_grads_target_log_prob)] return [ proposed_momentums, proposed_state_parts, proposed_target_log_prob, proposed_grads_target_log_prob, ] def _compute_energy_change(current_target_log_prob, current_momentums, proposed_target_log_prob, proposed_momentums, independent_chain_ndims, name=None): """Helper to `kernel` which computes the energy change.""" with ops.name_scope( name, "compute_energy_change", ([current_target_log_prob, proposed_target_log_prob, independent_chain_ndims] + current_momentums + proposed_momentums)): # Abbreviate lk0=log_kinetic_energy and lk1=proposed_log_kinetic_energy # since they're a mouthful and lets us inline more. lk0, lk1 = [], [] for current_momentum, proposed_momentum in zip(current_momentums, proposed_momentums): axis = math_ops.range(independent_chain_ndims, array_ops.rank(current_momentum)) lk0.append(_log_sum_sq(current_momentum, axis)) lk1.append(_log_sum_sq(proposed_momentum, axis)) lk0 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk0, axis=-1), axis=-1) lk1 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk1, axis=-1), axis=-1) lp0 = -current_target_log_prob # log_potential lp1 = -proposed_target_log_prob # proposed_log_potential x = array_ops.stack([lp1, math_ops.exp(lk1), -lp0, -math_ops.exp(lk0)], axis=-1) # The sum is NaN if any element is NaN or we see both +Inf and -Inf. # Thus we will replace such rows with infinite energy change which implies # rejection. Recall that float-comparisons with NaN are always False. is_sum_determinate = ( math_ops.reduce_all(math_ops.is_finite(x) | (x >= 0.), axis=-1) & math_ops.reduce_all(math_ops.is_finite(x) | (x <= 0.), axis=-1)) is_sum_determinate = array_ops.tile( is_sum_determinate[..., array_ops.newaxis], multiples=array_ops.concat([ array_ops.ones(array_ops.rank(is_sum_determinate), dtype=dtypes.int32), [4], ], axis=0)) x = array_ops.where(is_sum_determinate, x, array_ops.fill(array_ops.shape(x), value=x.dtype.as_numpy_dtype(np.inf))) return math_ops.reduce_sum(x, axis=-1) def _choose(is_accepted, accepted, rejected, independent_chain_ndims, name=None): """Helper to `kernel` which expand_dims `is_accepted` to apply tf.where.""" def _expand_is_accepted_like(x): with ops.name_scope("_choose"): expand_shape = array_ops.concat([ array_ops.shape(is_accepted), array_ops.ones([array_ops.rank(x) - array_ops.rank(is_accepted)], dtype=dtypes.int32), ], axis=0) multiples = array_ops.concat([ array_ops.ones([array_ops.rank(is_accepted)], dtype=dtypes.int32), array_ops.shape(x)[independent_chain_ndims:], ], axis=0) m = array_ops.tile(array_ops.reshape(is_accepted, expand_shape), multiples) m.set_shape(x.shape) return m with ops.name_scope(name, "_choose", values=[ is_accepted, accepted, rejected, independent_chain_ndims]): return array_ops.where(_expand_is_accepted_like(accepted), accepted, rejected) def _maybe_call_fn_and_grads(fn, fn_arg_list, fn_result=None, grads_fn_result=None, description="target_log_prob"): """Helper which computes `fn_result` and `grads` if needed.""" fn_arg_list = (list(fn_arg_list) if _is_list_like(fn_arg_list) else [fn_arg_list]) if fn_result is None: fn_result = fn(*fn_arg_list) if not fn_result.dtype.is_floating: raise TypeError("`{}` must be a `Tensor` with `float` `dtype`.".format( description)) if grads_fn_result is None: grads_fn_result = gradients_ops.gradients( fn_result, fn_arg_list) if len(fn_arg_list) != len(grads_fn_result): raise ValueError("`{}` must be in one-to-one correspondence with " "`grads_{}`".format(*[description]*2)) if any(g is None for g in grads_fn_result): raise ValueError("Encountered `None` gradient.") return fn_result, grads_fn_result def _prepare_args(target_log_prob_fn, state, step_size, target_log_prob=None, grads_target_log_prob=None, maybe_expand=False, description="target_log_prob"): """Helper which processes input args to meet list-like assumptions.""" state_parts = list(state) if _is_list_like(state) else [state] state_parts = [ops.convert_to_tensor(s, name="state") for s in state_parts] target_log_prob, grads_target_log_prob = _maybe_call_fn_and_grads( target_log_prob_fn, state_parts, target_log_prob, grads_target_log_prob, description) step_sizes = list(step_size) if _is_list_like(step_size) else [step_size] step_sizes = [ ops.convert_to_tensor( s, name="step_size", dtype=target_log_prob.dtype) for s in step_sizes] if len(step_sizes) == 1: step_sizes *= len(state_parts) if len(state_parts) != len(step_sizes): raise ValueError("There should be exactly one `step_size` or it should " "have same length as `current_state`.") maybe_flatten = lambda x: x if maybe_expand or _is_list_like(state) else x[0] return [ maybe_flatten(state_parts), maybe_flatten(step_sizes), target_log_prob, grads_target_log_prob, ] def _is_list_like(x): """Helper which returns `True` if input is `list`-like.""" return isinstance(x, (tuple, list)) def _log_sum_sq(x, axis=None): """Computes log(sum(x**2)).""" return math_ops.reduce_logsumexp(2. * math_ops.log(math_ops.abs(x)), axis)
38.838111
83
0.666319
from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import gradients_impl as gradients_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops.distributions import util as distributions_util __all__ = [ "sample_chain", "sample_annealed_importance_chain", "kernel", ] KernelResults = collections.namedtuple( "KernelResults", [ "acceptance_probs", "current_grads_target_log_prob", "current_target_log_prob", "energy_change", "is_accepted", "proposed_grads_target_log_prob", "proposed_state", "proposed_target_log_prob", "random_positive", ]) def _make_dummy_kernel_results( dummy_state, dummy_target_log_prob, dummy_grads_target_log_prob): return KernelResults( acceptance_probs=dummy_target_log_prob, current_grads_target_log_prob=dummy_grads_target_log_prob, current_target_log_prob=dummy_target_log_prob, energy_change=dummy_target_log_prob, is_accepted=array_ops.ones_like(dummy_target_log_prob, dtypes.bool), proposed_grads_target_log_prob=dummy_grads_target_log_prob, proposed_state=dummy_state, proposed_target_log_prob=dummy_target_log_prob, random_positive=dummy_target_log_prob, ) def sample_chain( num_results, target_log_prob_fn, current_state, step_size, num_leapfrog_steps, num_burnin_steps=0, num_steps_between_results=0, seed=None, current_target_log_prob=None, current_grads_target_log_prob=None, name=None): with ops.name_scope( name, "hmc_sample_chain", [num_results, current_state, step_size, num_leapfrog_steps, num_burnin_steps, num_steps_between_results, seed, current_target_log_prob, current_grads_target_log_prob]): with ops.name_scope("initialize"): [ current_state, step_size, current_target_log_prob, current_grads_target_log_prob, ] = _prepare_args( target_log_prob_fn, current_state, step_size, current_target_log_prob, current_grads_target_log_prob) num_results = ops.convert_to_tensor( num_results, dtype=dtypes.int32, name="num_results") num_leapfrog_steps = ops.convert_to_tensor( num_leapfrog_steps, dtype=dtypes.int32, name="num_leapfrog_steps") num_burnin_steps = ops.convert_to_tensor( num_burnin_steps, dtype=dtypes.int32, name="num_burnin_steps") num_steps_between_results = ops.convert_to_tensor( num_steps_between_results, dtype=dtypes.int32, name="num_steps_between_results") def _run_chain(num_steps, current_state, kernel_results): def _loop_body(iter_, current_state, kernel_results): return [iter_ + 1] + list(kernel( target_log_prob_fn, current_state, step_size, num_leapfrog_steps, seed, kernel_results.current_target_log_prob, kernel_results.current_grads_target_log_prob)) while_loop_kwargs = dict( cond=lambda iter_, *args: iter_ < num_steps, body=_loop_body, loop_vars=[ np.int32(0), current_state, kernel_results, ], ) if seed is not None: while_loop_kwargs["parallel_iterations"] = 1 return control_flow_ops.while_loop( **while_loop_kwargs)[1:] def _scan_body(args_list, iter_): current_state, kernel_results = args_list return _run_chain( 1 + array_ops.where(math_ops.equal(iter_, 0), num_burnin_steps, num_steps_between_results), current_state, kernel_results) scan_kwargs = dict( fn=_scan_body, elems=math_ops.range(num_results), initializer=[ current_state, _make_dummy_kernel_results( current_state, current_target_log_prob, current_grads_target_log_prob), ]) if seed is not None: scan_kwargs["parallel_iterations"] = 1 return functional_ops.scan(**scan_kwargs) def sample_annealed_importance_chain( proposal_log_prob_fn, num_steps, target_log_prob_fn, current_state, step_size, num_leapfrog_steps, seed=None, name=None): def make_convex_combined_log_prob_fn(iter_): def _fn(*args): p = proposal_log_prob_fn(*args) t = target_log_prob_fn(*args) dtype = p.dtype.base_dtype beta = (math_ops.cast(iter_ + 1, dtype) / math_ops.cast(num_steps, dtype)) return (1. - beta) * p + beta * t return _fn with ops.name_scope( name, "hmc_sample_annealed_importance_chain", [num_steps, current_state, step_size, num_leapfrog_steps, seed]): with ops.name_scope("initialize"): [ current_state, step_size, current_log_prob, current_grads_log_prob, ] = _prepare_args( make_convex_combined_log_prob_fn(iter_=0), current_state, step_size, description="convex_combined_log_prob") num_steps = ops.convert_to_tensor( num_steps, dtype=dtypes.int32, name="num_steps") num_leapfrog_steps = ops.convert_to_tensor( num_leapfrog_steps, dtype=dtypes.int32, name="num_leapfrog_steps") def _loop_body(iter_, ais_weights, current_state, kernel_results): current_state_parts = (list(current_state) if _is_list_like(current_state) else [current_state]) ais_weights += ((target_log_prob_fn(*current_state_parts) - proposal_log_prob_fn(*current_state_parts)) / math_ops.cast(num_steps, ais_weights.dtype)) return [iter_ + 1, ais_weights] + list(kernel( make_convex_combined_log_prob_fn(iter_), current_state, step_size, num_leapfrog_steps, seed, kernel_results.current_target_log_prob, kernel_results.current_grads_target_log_prob)) while_loop_kwargs = dict( cond=lambda iter_, *args: iter_ < num_steps, body=_loop_body, loop_vars=[ np.int32(0), array_ops.zeros_like(current_log_prob), current_state, _make_dummy_kernel_results(current_state, current_log_prob, current_grads_log_prob), ]) if seed is not None: while_loop_kwargs["parallel_iterations"] = 1 [ais_weights, current_state, kernel_results] = control_flow_ops.while_loop( **while_loop_kwargs)[1:] return [current_state, ais_weights, kernel_results] def kernel(target_log_prob_fn, current_state, step_size, num_leapfrog_steps, seed=None, current_target_log_prob=None, current_grads_target_log_prob=None, name=None): with ops.name_scope( name, "hmc_kernel", [current_state, step_size, num_leapfrog_steps, seed, current_target_log_prob, current_grads_target_log_prob]): with ops.name_scope("initialize"): [current_state_parts, step_sizes, current_target_log_prob, current_grads_target_log_prob] = _prepare_args( target_log_prob_fn, current_state, step_size, current_target_log_prob, current_grads_target_log_prob, maybe_expand=True) independent_chain_ndims = distributions_util.prefer_static_rank( current_target_log_prob) current_momentums = [] for s in current_state_parts: current_momentums.append(random_ops.random_normal( shape=array_ops.shape(s), dtype=s.dtype.base_dtype, seed=seed)) seed = distributions_util.gen_new_seed( seed, salt="hmc_kernel_momentums") num_leapfrog_steps = ops.convert_to_tensor( num_leapfrog_steps, dtype=dtypes.int32, name="num_leapfrog_steps") [ proposed_momentums, proposed_state_parts, proposed_target_log_prob, proposed_grads_target_log_prob, ] = _leapfrog_integrator(current_momentums, target_log_prob_fn, current_state_parts, step_sizes, num_leapfrog_steps, current_target_log_prob, current_grads_target_log_prob) energy_change = _compute_energy_change(current_target_log_prob, current_momentums, proposed_target_log_prob, proposed_momentums, independent_chain_ndims) # uniform from positive reals, i.e., `tf.random_uniform(minval=0, # maxval=np.inf)` won't work.) random_uniform = random_ops.random_uniform( shape=array_ops.shape(energy_change), dtype=energy_change.dtype, seed=seed) random_positive = -math_ops.log(random_uniform) is_accepted = random_positive >= energy_change accepted_target_log_prob = array_ops.where(is_accepted, proposed_target_log_prob, current_target_log_prob) accepted_state_parts = [_choose(is_accepted, proposed_state_part, current_state_part, independent_chain_ndims) for current_state_part, proposed_state_part in zip(current_state_parts, proposed_state_parts)] accepted_grads_target_log_prob = [ _choose(is_accepted, proposed_grad, grad, independent_chain_ndims) for proposed_grad, grad in zip(proposed_grads_target_log_prob, current_grads_target_log_prob)] maybe_flatten = lambda x: x if _is_list_like(current_state) else x[0] return [ maybe_flatten(accepted_state_parts), KernelResults( acceptance_probs=math_ops.exp(math_ops.minimum(-energy_change, 0.)), current_grads_target_log_prob=accepted_grads_target_log_prob, current_target_log_prob=accepted_target_log_prob, energy_change=energy_change, is_accepted=is_accepted, proposed_grads_target_log_prob=proposed_grads_target_log_prob, proposed_state=maybe_flatten(proposed_state_parts), proposed_target_log_prob=proposed_target_log_prob, random_positive=random_positive, ), ] def _leapfrog_integrator(current_momentums, target_log_prob_fn, current_state_parts, step_sizes, num_leapfrog_steps, current_target_log_prob=None, current_grads_target_log_prob=None, name=None): def _loop_body(step, current_momentums, current_state_parts, ignore_current_target_log_prob, current_grads_target_log_prob): return [step + 1] + list(_leapfrog_step(current_momentums, target_log_prob_fn, current_state_parts, step_sizes, current_grads_target_log_prob)) with ops.name_scope( name, "hmc_leapfrog_integrator", [current_momentums, current_state_parts, step_sizes, num_leapfrog_steps, current_target_log_prob, current_grads_target_log_prob]): if len(current_momentums) != len(current_state_parts): raise ValueError("`momentums` must be in one-to-one correspondence " "with `state_parts`") num_leapfrog_steps = ops.convert_to_tensor(num_leapfrog_steps, name="num_leapfrog_steps") current_target_log_prob, current_grads_target_log_prob = ( _maybe_call_fn_and_grads( target_log_prob_fn, current_state_parts, current_target_log_prob, current_grads_target_log_prob)) return control_flow_ops.while_loop( cond=lambda iter_, *args: iter_ < num_leapfrog_steps, body=_loop_body, loop_vars=[ np.int32(0), current_momentums, current_state_parts, current_target_log_prob, current_grads_target_log_prob, ], back_prop=False)[1:] def _leapfrog_step(current_momentums, target_log_prob_fn, current_state_parts, step_sizes, current_grads_target_log_prob, name=None): with ops.name_scope( name, "_leapfrog_step", [current_momentums, current_state_parts, step_sizes, current_grads_target_log_prob]): proposed_momentums = [m + 0.5 * ss * g for m, ss, g in zip(current_momentums, step_sizes, current_grads_target_log_prob)] proposed_state_parts = [x + ss * m for x, ss, m in zip(current_state_parts, step_sizes, proposed_momentums)] proposed_target_log_prob = target_log_prob_fn(*proposed_state_parts) if not proposed_target_log_prob.dtype.is_floating: raise TypeError("`target_log_prob_fn` must produce a `Tensor` " "with `float` `dtype`.") proposed_grads_target_log_prob = gradients_ops.gradients( proposed_target_log_prob, proposed_state_parts) if any(g is None for g in proposed_grads_target_log_prob): raise ValueError( "Encountered `None` gradient. Does your target `target_log_prob_fn` " "access all `tf.Variable`s via `tf.get_variable`?\n" " current_state_parts: {}\n" " proposed_state_parts: {}\n" " proposed_grads_target_log_prob: {}".format( current_state_parts, proposed_state_parts, proposed_grads_target_log_prob)) proposed_momentums = [m + 0.5 * ss * g for m, ss, g in zip(proposed_momentums, step_sizes, proposed_grads_target_log_prob)] return [ proposed_momentums, proposed_state_parts, proposed_target_log_prob, proposed_grads_target_log_prob, ] def _compute_energy_change(current_target_log_prob, current_momentums, proposed_target_log_prob, proposed_momentums, independent_chain_ndims, name=None): with ops.name_scope( name, "compute_energy_change", ([current_target_log_prob, proposed_target_log_prob, independent_chain_ndims] + current_momentums + proposed_momentums)): lk0, lk1 = [], [] for current_momentum, proposed_momentum in zip(current_momentums, proposed_momentums): axis = math_ops.range(independent_chain_ndims, array_ops.rank(current_momentum)) lk0.append(_log_sum_sq(current_momentum, axis)) lk1.append(_log_sum_sq(proposed_momentum, axis)) lk0 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk0, axis=-1), axis=-1) lk1 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk1, axis=-1), axis=-1) lp0 = -current_target_log_prob # log_potential lp1 = -proposed_target_log_prob # proposed_log_potential x = array_ops.stack([lp1, math_ops.exp(lk1), -lp0, -math_ops.exp(lk0)], axis=-1) # The sum is NaN if any element is NaN or we see both +Inf and -Inf. # Thus we will replace such rows with infinite energy change which implies # rejection. Recall that float-comparisons with NaN are always False. is_sum_determinate = ( math_ops.reduce_all(math_ops.is_finite(x) | (x >= 0.), axis=-1) & math_ops.reduce_all(math_ops.is_finite(x) | (x <= 0.), axis=-1)) is_sum_determinate = array_ops.tile( is_sum_determinate[..., array_ops.newaxis], multiples=array_ops.concat([ array_ops.ones(array_ops.rank(is_sum_determinate), dtype=dtypes.int32), [4], ], axis=0)) x = array_ops.where(is_sum_determinate, x, array_ops.fill(array_ops.shape(x), value=x.dtype.as_numpy_dtype(np.inf))) return math_ops.reduce_sum(x, axis=-1) def _choose(is_accepted, accepted, rejected, independent_chain_ndims, name=None): def _expand_is_accepted_like(x): with ops.name_scope("_choose"): expand_shape = array_ops.concat([ array_ops.shape(is_accepted), array_ops.ones([array_ops.rank(x) - array_ops.rank(is_accepted)], dtype=dtypes.int32), ], axis=0) multiples = array_ops.concat([ array_ops.ones([array_ops.rank(is_accepted)], dtype=dtypes.int32), array_ops.shape(x)[independent_chain_ndims:], ], axis=0) m = array_ops.tile(array_ops.reshape(is_accepted, expand_shape), multiples) m.set_shape(x.shape) return m with ops.name_scope(name, "_choose", values=[ is_accepted, accepted, rejected, independent_chain_ndims]): return array_ops.where(_expand_is_accepted_like(accepted), accepted, rejected) def _maybe_call_fn_and_grads(fn, fn_arg_list, fn_result=None, grads_fn_result=None, description="target_log_prob"): fn_arg_list = (list(fn_arg_list) if _is_list_like(fn_arg_list) else [fn_arg_list]) if fn_result is None: fn_result = fn(*fn_arg_list) if not fn_result.dtype.is_floating: raise TypeError("`{}` must be a `Tensor` with `float` `dtype`.".format( description)) if grads_fn_result is None: grads_fn_result = gradients_ops.gradients( fn_result, fn_arg_list) if len(fn_arg_list) != len(grads_fn_result): raise ValueError("`{}` must be in one-to-one correspondence with " "`grads_{}`".format(*[description]*2)) if any(g is None for g in grads_fn_result): raise ValueError("Encountered `None` gradient.") return fn_result, grads_fn_result def _prepare_args(target_log_prob_fn, state, step_size, target_log_prob=None, grads_target_log_prob=None, maybe_expand=False, description="target_log_prob"): state_parts = list(state) if _is_list_like(state) else [state] state_parts = [ops.convert_to_tensor(s, name="state") for s in state_parts] target_log_prob, grads_target_log_prob = _maybe_call_fn_and_grads( target_log_prob_fn, state_parts, target_log_prob, grads_target_log_prob, description) step_sizes = list(step_size) if _is_list_like(step_size) else [step_size] step_sizes = [ ops.convert_to_tensor( s, name="step_size", dtype=target_log_prob.dtype) for s in step_sizes] if len(step_sizes) == 1: step_sizes *= len(state_parts) if len(state_parts) != len(step_sizes): raise ValueError("There should be exactly one `step_size` or it should " "have same length as `current_state`.") maybe_flatten = lambda x: x if maybe_expand or _is_list_like(state) else x[0] return [ maybe_flatten(state_parts), maybe_flatten(step_sizes), target_log_prob, grads_target_log_prob, ] def _is_list_like(x): return isinstance(x, (tuple, list)) def _log_sum_sq(x, axis=None): return math_ops.reduce_logsumexp(2. * math_ops.log(math_ops.abs(x)), axis)
true
true
f724923d68de6140d8f4a588c2dc56847fda4dbf
1,235
py
Python
k8s/redis_component.py
wesfloyd/aave-ui-caching-server
714149d66b8718d0610dc84faff2854930f05f3f
[ "BSD-3-Clause" ]
null
null
null
k8s/redis_component.py
wesfloyd/aave-ui-caching-server
714149d66b8718d0610dc84faff2854930f05f3f
[ "BSD-3-Clause" ]
null
null
null
k8s/redis_component.py
wesfloyd/aave-ui-caching-server
714149d66b8718d0610dc84faff2854930f05f3f
[ "BSD-3-Clause" ]
null
null
null
from kdsl.apps.v1 import Deployment, DeploymentSpec from kdsl.core.v1 import Service, ServiceSpec, PodSpec, ObjectMeta, ContainerItem import values name = "redis" labels = dict(component=name) annotations = values.shared_annotations metadata = ObjectMeta( name=name, namespace=values.NAMESPACE, labels=dict(**labels, **values.shared_labels, **values.datadog_labels(name)), annotations=values.shared_annotations ) service = Service( metadata=metadata, spec=ServiceSpec( selector=labels, ports={ 6379: dict(name="redis"), }, ), ) pod_spec = PodSpec( containers=dict( redis=ContainerItem( image="redis:6-alpine", imagePullPolicy="Always", ports={ 6379: dict(name="redis", protocol="TCP"), }, ), ), ) deployment = Deployment( metadata=metadata, spec=DeploymentSpec( replicas=1, selector=dict(matchLabels=labels), template=dict( metadata=ObjectMeta( labels=dict(**metadata.labels), annotations=annotations ), spec=pod_spec, ), ), ) entries = [service, deployment]
20.583333
81
0.597571
from kdsl.apps.v1 import Deployment, DeploymentSpec from kdsl.core.v1 import Service, ServiceSpec, PodSpec, ObjectMeta, ContainerItem import values name = "redis" labels = dict(component=name) annotations = values.shared_annotations metadata = ObjectMeta( name=name, namespace=values.NAMESPACE, labels=dict(**labels, **values.shared_labels, **values.datadog_labels(name)), annotations=values.shared_annotations ) service = Service( metadata=metadata, spec=ServiceSpec( selector=labels, ports={ 6379: dict(name="redis"), }, ), ) pod_spec = PodSpec( containers=dict( redis=ContainerItem( image="redis:6-alpine", imagePullPolicy="Always", ports={ 6379: dict(name="redis", protocol="TCP"), }, ), ), ) deployment = Deployment( metadata=metadata, spec=DeploymentSpec( replicas=1, selector=dict(matchLabels=labels), template=dict( metadata=ObjectMeta( labels=dict(**metadata.labels), annotations=annotations ), spec=pod_spec, ), ), ) entries = [service, deployment]
true
true
f72492ac2f5be9bca11a91d75997f67a02504d8f
56,520
py
Python
letsencrypt-apache/letsencrypt_apache/configurator.py
sinesiobittencourt/letsencrypt
0c704fa7f27277c838e13deed85e5c1ecbe46e90
[ "Apache-2.0" ]
1
2015-12-26T22:50:32.000Z
2015-12-26T22:50:32.000Z
letsencrypt-apache/letsencrypt_apache/configurator.py
sinesiobittencourt/letsencrypt
0c704fa7f27277c838e13deed85e5c1ecbe46e90
[ "Apache-2.0" ]
null
null
null
letsencrypt-apache/letsencrypt_apache/configurator.py
sinesiobittencourt/letsencrypt
0c704fa7f27277c838e13deed85e5c1ecbe46e90
[ "Apache-2.0" ]
null
null
null
"""Apache Configuration based off of Augeas Configurator.""" # pylint: disable=too-many-lines import filecmp import logging import os import re import shutil import socket import time import zope.interface from acme import challenges from letsencrypt import errors from letsencrypt import interfaces from letsencrypt import le_util from letsencrypt.plugins import common from letsencrypt_apache import augeas_configurator from letsencrypt_apache import constants from letsencrypt_apache import display_ops from letsencrypt_apache import tls_sni_01 from letsencrypt_apache import obj from letsencrypt_apache import parser from collections import defaultdict logger = logging.getLogger(__name__) # TODO: Augeas sections ie. <VirtualHost>, <IfModule> beginning and closing # tags need to be the same case, otherwise Augeas doesn't recognize them. # This is not able to be completely remedied by regular expressions because # Augeas views <VirtualHost> </Virtualhost> as an error. This will just # require another check_parsing_errors() after all files are included... # (after a find_directive search is executed currently). It can be a one # time check however because all of LE's transactions will ensure # only properly formed sections are added. # Note: This protocol works for filenames with spaces in it, the sites are # properly set up and directives are changed appropriately, but Apache won't # recognize names in sites-enabled that have spaces. These are not added to the # Apache configuration. It may be wise to warn the user if they are trying # to use vhost filenames that contain spaces and offer to change ' ' to '_' # Note: FILEPATHS and changes to files are transactional. They are copied # over before the updates are made to the existing files. NEW_FILES is # transactional due to the use of register_file_creation() # TODO: Verify permissions on configuration root... it is easier than # checking permissions on each of the relative directories and less error # prone. # TODO: Write a server protocol finder. Listen <port> <protocol> or # Protocol <protocol>. This can verify partial setups are correct # TODO: Add directives to sites-enabled... not sites-available. # sites-available doesn't allow immediate find_dir search even with save() # and load() class ApacheConfigurator(augeas_configurator.AugeasConfigurator): # pylint: disable=too-many-instance-attributes,too-many-public-methods """Apache configurator. State of Configurator: This code has been been tested and built for Ubuntu 14.04 Apache 2.4 and it works for Ubuntu 12.04 Apache 2.2 :ivar config: Configuration. :type config: :class:`~letsencrypt.interfaces.IConfig` :ivar parser: Handles low level parsing :type parser: :class:`~letsencrypt_apache.parser` :ivar tup version: version of Apache :ivar list vhosts: All vhosts found in the configuration (:class:`list` of :class:`~letsencrypt_apache.obj.VirtualHost`) :ivar dict assoc: Mapping between domains and vhosts """ zope.interface.implements(interfaces.IAuthenticator, interfaces.IInstaller) zope.interface.classProvides(interfaces.IPluginFactory) description = "Apache Web Server - Alpha" @classmethod def add_parser_arguments(cls, add): add("ctl", default=constants.CLI_DEFAULTS["ctl"], help="Path to the 'apache2ctl' binary, used for 'configtest', " "retrieving the Apache2 version number, and initialization " "parameters.") add("enmod", default=constants.CLI_DEFAULTS["enmod"], help="Path to the Apache 'a2enmod' binary.") add("dismod", default=constants.CLI_DEFAULTS["dismod"], help="Path to the Apache 'a2dismod' binary.") add("le-vhost-ext", default=constants.CLI_DEFAULTS["le_vhost_ext"], help="SSL vhost configuration extension.") add("server-root", default=constants.CLI_DEFAULTS["server_root"], help="Apache server root directory.") le_util.add_deprecated_argument(add, "init-script", 1) def __init__(self, *args, **kwargs): """Initialize an Apache Configurator. :param tup version: version of Apache as a tuple (2, 4, 7) (used mostly for unittesting) """ version = kwargs.pop("version", None) super(ApacheConfigurator, self).__init__(*args, **kwargs) # Add name_server association dict self.assoc = dict() # Outstanding challenges self._chall_out = set() # These will be set in the prepare function self.parser = None self.version = version self.vhosts = None self._enhance_func = {"redirect": self._enable_redirect, "ensure-http-header": self._set_http_header} @property def mod_ssl_conf(self): """Full absolute path to SSL configuration file.""" return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST) def prepare(self): """Prepare the authenticator/installer. :raises .errors.NoInstallationError: If Apache configs cannot be found :raises .errors.MisconfigurationError: If Apache is misconfigured :raises .errors.NotSupportedError: If Apache version is not supported :raises .errors.PluginError: If there is any other error """ # Verify Apache is installed for exe in (self.conf("ctl"), self.conf("enmod"), self.conf("dismod")): if not le_util.exe_exists(exe): raise errors.NoInstallationError # Make sure configuration is valid self.config_test() self.parser = parser.ApacheParser( self.aug, self.conf("server-root"), self.conf("ctl")) # Check for errors in parsing files with Augeas self.check_parsing_errors("httpd.aug") # Set Version if self.version is None: self.version = self.get_version() if self.version < (2, 2): raise errors.NotSupportedError( "Apache Version %s not supported.", str(self.version)) # Get all of the available vhosts self.vhosts = self.get_virtual_hosts() install_ssl_options_conf(self.mod_ssl_conf) def deploy_cert(self, domain, cert_path, key_path, chain_path=None, fullchain_path=None): # pylint: disable=unused-argument """Deploys certificate to specified virtual host. Currently tries to find the last directives to deploy the cert in the VHost associated with the given domain. If it can't find the directives, it searches the "included" confs. The function verifies that it has located the three directives and finally modifies them to point to the correct destination. After the certificate is installed, the VirtualHost is enabled if it isn't already. .. todo:: Might be nice to remove chain directive if none exists This shouldn't happen within letsencrypt though :raises errors.PluginError: When unable to deploy certificate due to a lack of directives """ vhost = self.choose_vhost(domain) self._clean_vhost(vhost) # This is done first so that ssl module is enabled and cert_path, # cert_key... can all be parsed appropriately self.prepare_server_https("443") path = {"cert_path": self.parser.find_dir("SSLCertificateFile", None, vhost.path), "cert_key": self.parser.find_dir("SSLCertificateKeyFile", None, vhost.path)} # Only include if a certificate chain is specified if chain_path is not None: path["chain_path"] = self.parser.find_dir( "SSLCertificateChainFile", None, vhost.path) if not path["cert_path"] or not path["cert_key"]: # Throw some can't find all of the directives error" logger.warn( "Cannot find a cert or key directive in %s. " "VirtualHost was not modified", vhost.path) # Presumably break here so that the virtualhost is not modified raise errors.PluginError( "Unable to find cert and/or key directives") logger.info("Deploying Certificate to VirtualHost %s", vhost.filep) logger.debug("Apache version is %s", ".".join(str(i) for i in self.version)) if self.version < (2, 4, 8) or (chain_path and not fullchain_path): # install SSLCertificateFile, SSLCertificateKeyFile, # and SSLCertificateChainFile directives set_cert_path = cert_path self.aug.set(path["cert_path"][-1], cert_path) self.aug.set(path["cert_key"][-1], key_path) if chain_path is not None: self.parser.add_dir(vhost.path, "SSLCertificateChainFile", chain_path) else: raise errors.PluginError("--chain-path is required for your version of Apache") else: if not fullchain_path: raise errors.PluginError("Please provide the --fullchain-path\ option pointing to your full chain file") set_cert_path = fullchain_path self.aug.set(path["cert_path"][-1], fullchain_path) self.aug.set(path["cert_key"][-1], key_path) # Save notes about the transaction that took place self.save_notes += ("Changed vhost at %s with addresses of %s\n" "\tSSLCertificateFile %s\n" "\tSSLCertificateKeyFile %s\n" % (vhost.filep, ", ".join(str(addr) for addr in vhost.addrs), set_cert_path, key_path)) if chain_path is not None: self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path # Make sure vhost is enabled if not vhost.enabled: self.enable_site(vhost) def choose_vhost(self, target_name, temp=False): """Chooses a virtual host based on the given domain name. If there is no clear virtual host to be selected, the user is prompted with all available choices. The returned vhost is guaranteed to have TLS enabled unless temp is True. If temp is True, there is no such guarantee and the result is not cached. :param str target_name: domain name :param bool temp: whether the vhost is only used temporarily :returns: ssl vhost associated with name :rtype: :class:`~letsencrypt_apache.obj.VirtualHost` :raises .errors.PluginError: If no vhost is available or chosen """ # Allows for domain names to be associated with a virtual host if target_name in self.assoc: return self.assoc[target_name] # Try to find a reasonable vhost vhost = self._find_best_vhost(target_name) if vhost is not None: if temp: return vhost if not vhost.ssl: vhost = self.make_vhost_ssl(vhost) self.assoc[target_name] = vhost return vhost return self._choose_vhost_from_list(target_name, temp) def _choose_vhost_from_list(self, target_name, temp=False): # Select a vhost from a list vhost = display_ops.select_vhost(target_name, self.vhosts) if vhost is None: logger.error( "No vhost exists with servername or alias of: %s. " "No vhost was selected. Please specify servernames " "in the Apache config", target_name) raise errors.PluginError("No vhost selected") elif temp: return vhost elif not vhost.ssl: addrs = self._get_proposed_addrs(vhost, "443") # TODO: Conflicts is too conservative if not any(vhost.enabled and vhost.conflicts(addrs) for vhost in self.vhosts): vhost = self.make_vhost_ssl(vhost) else: logger.error( "The selected vhost would conflict with other HTTPS " "VirtualHosts within Apache. Please select another " "vhost or add ServerNames to your configuration.") raise errors.PluginError( "VirtualHost not able to be selected.") self.assoc[target_name] = vhost return vhost def _find_best_vhost(self, target_name): """Finds the best vhost for a target_name. This does not upgrade a vhost to HTTPS... it only finds the most appropriate vhost for the given target_name. :returns: VHost or None """ # Points 4 - Servername SSL # Points 3 - Address name with SSL # Points 2 - Servername no SSL # Points 1 - Address name with no SSL best_candidate = None best_points = 0 for vhost in self.vhosts: if vhost.modmacro is True: continue if target_name in vhost.get_names(): points = 2 elif any(addr.get_addr() == target_name for addr in vhost.addrs): points = 1 else: # No points given if names can't be found. # This gets hit but doesn't register continue # pragma: no cover if vhost.ssl: points += 2 if points > best_points: best_points = points best_candidate = vhost # No winners here... is there only one reasonable vhost? if best_candidate is None: # reasonable == Not all _default_ addrs vhosts = self._non_default_vhosts() # remove mod_macro hosts from reasonable vhosts reasonable_vhosts = [vh for vh in vhosts if vh.modmacro is False] if len(reasonable_vhosts) == 1: best_candidate = reasonable_vhosts[0] return best_candidate def _non_default_vhosts(self): """Return all non _default_ only vhosts.""" return [vh for vh in self.vhosts if not all( addr.get_addr() == "_default_" for addr in vh.addrs )] def get_all_names(self): """Returns all names found in the Apache Configuration. :returns: All ServerNames, ServerAliases, and reverse DNS entries for virtual host addresses :rtype: set """ all_names = set() vhost_macro = [] for vhost in self.vhosts: all_names.update(vhost.get_names()) if vhost.modmacro: vhost_macro.append(vhost.filep) for addr in vhost.addrs: if common.hostname_regex.match(addr.get_addr()): all_names.add(addr.get_addr()) else: name = self.get_name_from_ip(addr) if name: all_names.add(name) if len(vhost_macro) > 0: zope.component.getUtility(interfaces.IDisplay).notification( "Apache mod_macro seems to be in use in file(s):\n{0}" "\n\nUnfortunately mod_macro is not yet supported".format( "\n ".join(vhost_macro))) return all_names def get_name_from_ip(self, addr): # pylint: disable=no-self-use """Returns a reverse dns name if available. :param addr: IP Address :type addr: ~.common.Addr :returns: name or empty string if name cannot be determined :rtype: str """ # If it isn't a private IP, do a reverse DNS lookup if not common.private_ips_regex.match(addr.get_addr()): try: socket.inet_aton(addr.get_addr()) return socket.gethostbyaddr(addr.get_addr())[0] except (socket.error, socket.herror, socket.timeout): pass return "" def _add_servernames(self, host): """Helper function for get_virtual_hosts(). :param host: In progress vhost whose names will be added :type host: :class:`~letsencrypt_apache.obj.VirtualHost` """ # Take the final ServerName as each overrides the previous servername_match = self.parser.find_dir( "ServerName", None, start=host.path, exclude=False) serveralias_match = self.parser.find_dir( "ServerAlias", None, start=host.path, exclude=False) for alias in serveralias_match: serveralias = self.parser.get_arg(alias) if not host.modmacro: host.aliases.add(serveralias) if servername_match: # Get last ServerName as each overwrites the previous servername = self.parser.get_arg(servername_match[-1]) if not host.modmacro: host.name = servername def _create_vhost(self, path): """Used by get_virtual_hosts to create vhost objects :param str path: Augeas path to virtual host :returns: newly created vhost :rtype: :class:`~letsencrypt_apache.obj.VirtualHost` """ addrs = set() args = self.aug.match(path + "/arg") for arg in args: addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg))) is_ssl = False if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False): is_ssl = True # "SSLEngine on" might be set outside of <VirtualHost> # Treat vhosts with port 443 as ssl vhosts for addr in addrs: if addr.get_port() == "443": is_ssl = True filename = get_file_path(path) is_enabled = self.is_site_enabled(filename) macro = False if "/macro/" in path.lower(): macro = True vhost = obj.VirtualHost(filename, path, addrs, is_ssl, is_enabled, modmacro=macro) self._add_servernames(vhost) return vhost # TODO: make "sites-available" a configurable directory def get_virtual_hosts(self): """Returns list of virtual hosts found in the Apache configuration. :returns: List of :class:`~letsencrypt_apache.obj.VirtualHost` objects found in configuration :rtype: list """ # Search sites-available, httpd.conf for possible virtual hosts paths = self.aug.match( ("/files%s/sites-available//*[label()=~regexp('%s')]" % (self.parser.root, parser.case_i("VirtualHost")))) vhs = [] for path in paths: vhs.append(self._create_vhost(path)) return vhs def is_name_vhost(self, target_addr): """Returns if vhost is a name based vhost NameVirtualHost was deprecated in Apache 2.4 as all VirtualHosts are now NameVirtualHosts. If version is earlier than 2.4, check if addr has a NameVirtualHost directive in the Apache config :param letsencrypt_apache.obj.Addr target_addr: vhost address :returns: Success :rtype: bool """ # Mixed and matched wildcard NameVirtualHost with VirtualHost # behavior is undefined. Make sure that an exact match exists # search for NameVirtualHost directive for ip_addr # note ip_addr can be FQDN although Apache does not recommend it return (self.version >= (2, 4) or self.parser.find_dir("NameVirtualHost", str(target_addr))) def add_name_vhost(self, addr): """Adds NameVirtualHost directive for given address. :param addr: Address that will be added as NameVirtualHost directive :type addr: :class:`~letsencrypt_apache.obj.Addr` """ loc = parser.get_aug_path(self.parser.loc["name"]) if addr.get_port() == "443": path = self.parser.add_dir_to_ifmodssl( loc, "NameVirtualHost", [str(addr)]) else: path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)]) msg = ("Setting %s to be NameBasedVirtualHost\n" "\tDirective added to %s\n" % (addr, path)) logger.debug(msg) self.save_notes += msg def prepare_server_https(self, port, temp=False): """Prepare the server for HTTPS. Make sure that the ssl_module is loaded and that the server is appropriately listening on port. :param str port: Port to listen on """ if "ssl_module" not in self.parser.modules: self.enable_mod("ssl", temp=temp) # Check for Listen <port> # Note: This could be made to also look for ip:443 combo listens = [self.parser.get_arg(x).split()[0] for x in self.parser.find_dir("Listen")] # In case no Listens are set (which really is a broken apache config) if not listens: listens = ["80"] for listen in listens: # For any listen statement, check if the machine also listens on Port 443. # If not, add such a listen statement. if len(listen.split(":")) == 1: # Its listening to all interfaces if port not in listens: if port == "443": args = [port] else: # Non-standard ports should specify https protocol args = [port, "https"] self.parser.add_dir_to_ifmodssl( parser.get_aug_path( self.parser.loc["listen"]), "Listen", args) self.save_notes += "Added Listen %s directive to %s\n" % ( port, self.parser.loc["listen"]) listens.append(port) else: # The Listen statement specifies an ip _, ip = listen[::-1].split(":", 1) ip = ip[::-1] if "%s:%s" % (ip, port) not in listens: if port == "443": args = ["%s:%s" % (ip, port)] else: # Non-standard ports should specify https protocol args = ["%s:%s" % (ip, port), "https"] self.parser.add_dir_to_ifmodssl( parser.get_aug_path( self.parser.loc["listen"]), "Listen", args) self.save_notes += "Added Listen %s:%s directive to %s\n" % ( ip, port, self.parser.loc["listen"]) listens.append("%s:%s" % (ip, port)) def make_addrs_sni_ready(self, addrs): """Checks to see if the server is ready for SNI challenges. :param addrs: Addresses to check SNI compatibility :type addrs: :class:`~letsencrypt_apache.obj.Addr` """ # Version 2.4 and later are automatically SNI ready. if self.version >= (2, 4): return for addr in addrs: if not self.is_name_vhost(addr): logger.debug("Setting VirtualHost at %s to be a name " "based virtual host", addr) self.add_name_vhost(addr) def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals """Makes an ssl_vhost version of a nonssl_vhost. Duplicates vhost and adds default ssl options New vhost will reside as (nonssl_vhost.path) + ``letsencrypt_apache.constants.CLI_DEFAULTS["le_vhost_ext"]`` .. note:: This function saves the configuration :param nonssl_vhost: Valid VH that doesn't have SSLEngine on :type nonssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost` :returns: SSL vhost :rtype: :class:`~letsencrypt_apache.obj.VirtualHost` :raises .errors.PluginError: If more than one virtual host is in the file or if plugin is unable to write/read vhost files. """ avail_fp = nonssl_vhost.filep ssl_fp = self._get_ssl_vhost_path(avail_fp) self._copy_create_ssl_vhost_skeleton(avail_fp, ssl_fp) # Reload augeas to take into account the new vhost self.aug.load() # Get Vhost augeas path for new vhost vh_p = self.aug.match("/files%s//* [label()=~regexp('%s')]" % (ssl_fp, parser.case_i("VirtualHost"))) if len(vh_p) != 1: logger.error("Error: should only be one vhost in %s", avail_fp) raise errors.PluginError("Currently, we only support " "configurations with one vhost per file") else: # This simplifies the process vh_p = vh_p[0] # Update Addresses self._update_ssl_vhosts_addrs(vh_p) # Add directives self._add_dummy_ssl_directives(vh_p) # Log actions and create save notes logger.info("Created an SSL vhost at %s", ssl_fp) self.save_notes += "Created ssl vhost at %s\n" % ssl_fp self.save() # We know the length is one because of the assertion above # Create the Vhost object ssl_vhost = self._create_vhost(vh_p) self.vhosts.append(ssl_vhost) # NOTE: Searches through Augeas seem to ruin changes to directives # The configuration must also be saved before being searched # for the new directives; For these reasons... this is tacked # on after fully creating the new vhost # Now check if addresses need to be added as NameBasedVhost addrs # This is for compliance with versions of Apache < 2.4 self._add_name_vhost_if_necessary(ssl_vhost) return ssl_vhost def _get_ssl_vhost_path(self, non_ssl_vh_fp): # Get filepath of new ssl_vhost if non_ssl_vh_fp.endswith(".conf"): return non_ssl_vh_fp[:-(len(".conf"))] + self.conf("le_vhost_ext") else: return non_ssl_vh_fp + self.conf("le_vhost_ext") def _copy_create_ssl_vhost_skeleton(self, avail_fp, ssl_fp): """Copies over existing Vhost with IfModule mod_ssl.c> skeleton. :param str avail_fp: Pointer to the original available non-ssl vhost :param str ssl_fp: Full path where the new ssl_vhost will reside. A new file is created on the filesystem. """ # First register the creation so that it is properly removed if # configuration is rolled back self.reverter.register_file_creation(False, ssl_fp) try: with open(avail_fp, "r") as orig_file: with open(ssl_fp, "w") as new_file: new_file.write("<IfModule mod_ssl.c>\n") for line in orig_file: new_file.write(line) new_file.write("</IfModule>\n") except IOError: logger.fatal("Error writing/reading to file in make_vhost_ssl") raise errors.PluginError("Unable to write/read in make_vhost_ssl") def _update_ssl_vhosts_addrs(self, vh_path): ssl_addrs = set() ssl_addr_p = self.aug.match(vh_path + "/arg") for addr in ssl_addr_p: old_addr = obj.Addr.fromstring( str(self.parser.get_arg(addr))) ssl_addr = old_addr.get_addr_obj("443") self.aug.set(addr, str(ssl_addr)) ssl_addrs.add(ssl_addr) return ssl_addrs def _clean_vhost(self, vhost): # remove duplicated or conflicting ssl directives self._deduplicate_directives(vhost.path, ["SSLCertificateFile", "SSLCertificateKeyFile"]) # remove all problematic directives self._remove_directives(vhost.path, ["SSLCertificateChainFile"]) def _deduplicate_directives(self, vh_path, directives): for directive in directives: while len(self.parser.find_dir(directive, None, vh_path, False)) > 1: directive_path = self.parser.find_dir(directive, None, vh_path, False) self.aug.remove(re.sub(r"/\w*$", "", directive_path[0])) def _remove_directives(self, vh_path, directives): for directive in directives: while len(self.parser.find_dir(directive, None, vh_path, False)) > 0: directive_path = self.parser.find_dir(directive, None, vh_path, False) self.aug.remove(re.sub(r"/\w*$", "", directive_path[0])) def _add_dummy_ssl_directives(self, vh_path): self.parser.add_dir(vh_path, "SSLCertificateFile", "insert_cert_file_path") self.parser.add_dir(vh_path, "SSLCertificateKeyFile", "insert_key_file_path") self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf) def _add_name_vhost_if_necessary(self, vhost): """Add NameVirtualHost Directives if necessary for new vhost. NameVirtualHosts was a directive in Apache < 2.4 https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost :param vhost: New virtual host that was recently created. :type vhost: :class:`~letsencrypt_apache.obj.VirtualHost` """ need_to_save = False # See if the exact address appears in any other vhost # Remember 1.1.1.1:* == 1.1.1.1 -> hence any() for addr in vhost.addrs: for test_vh in self.vhosts: if (vhost.filep != test_vh.filep and any(test_addr == addr for test_addr in test_vh.addrs) and not self.is_name_vhost(addr)): self.add_name_vhost(addr) logger.info("Enabling NameVirtualHosts on %s", addr) need_to_save = True if need_to_save: self.save() ############################################################################ # Enhancements ############################################################################ def supported_enhancements(self): # pylint: disable=no-self-use """Returns currently supported enhancements.""" return ["redirect", "ensure-http-header"] def enhance(self, domain, enhancement, options=None): """Enhance configuration. :param str domain: domain to enhance :param str enhancement: enhancement type defined in :const:`~letsencrypt.constants.ENHANCEMENTS` :param options: options for the enhancement See :const:`~letsencrypt.constants.ENHANCEMENTS` documentation for appropriate parameter. :raises .errors.PluginError: If Enhancement is not supported, or if there is any other problem with the enhancement. """ try: func = self._enhance_func[enhancement] except KeyError: raise errors.PluginError( "Unsupported enhancement: {0}".format(enhancement)) try: func(self.choose_vhost(domain), options) except errors.PluginError: logger.warn("Failed %s for %s", enhancement, domain) raise def _set_http_header(self, ssl_vhost, header_substring): """Enables header that is identified by header_substring on ssl_vhost. If the header identified by header_substring is not already set, a new Header directive is placed in ssl_vhost's configuration with arguments from: constants.HTTP_HEADER[header_substring] .. note:: This function saves the configuration :param ssl_vhost: Destination of traffic, an ssl enabled vhost :type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost` :param header_substring: string that uniquely identifies a header. e.g: Strict-Transport-Security, Upgrade-Insecure-Requests. :type str :returns: Success, general_vhost (HTTP vhost) :rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`) :raises .errors.PluginError: If no viable HTTP host can be created or set with header header_substring. """ if "headers_module" not in self.parser.modules: self.enable_mod("headers") # Check if selected header is already set self._verify_no_matching_http_header(ssl_vhost, header_substring) # Add directives to server self.parser.add_dir(ssl_vhost.path, "Header", constants.HEADER_ARGS[header_substring]) self.save_notes += ("Adding %s header to ssl vhost in %s\n" % (header_substring, ssl_vhost.filep)) self.save() logger.info("Adding %s header to ssl vhost in %s", header_substring, ssl_vhost.filep) def _verify_no_matching_http_header(self, ssl_vhost, header_substring): """Checks to see if an there is an existing Header directive that contains the string header_substring. :param ssl_vhost: vhost to check :type vhost: :class:`~letsencrypt_apache.obj.VirtualHost` :param header_substring: string that uniquely identifies a header. e.g: Strict-Transport-Security, Upgrade-Insecure-Requests. :type str :returns: boolean :rtype: (bool) :raises errors.PluginEnhancementAlreadyPresent When header header_substring exists """ header_path = self.parser.find_dir("Header", None, start=ssl_vhost.path) if header_path: # "Existing Header directive for virtualhost" pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower()) for match in header_path: if re.search(pat, self.aug.get(match).lower()): raise errors.PluginEnhancementAlreadyPresent( "Existing %s header" % (header_substring)) def _enable_redirect(self, ssl_vhost, unused_options): """Redirect all equivalent HTTP traffic to ssl_vhost. .. todo:: This enhancement should be rewritten and will unfortunately require lots of debugging by hand. Adds Redirect directive to the port 80 equivalent of ssl_vhost First the function attempts to find the vhost with equivalent ip addresses that serves on non-ssl ports The function then adds the directive .. note:: This function saves the configuration :param ssl_vhost: Destination of traffic, an ssl enabled vhost :type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost` :param unused_options: Not currently used :type unused_options: Not Available :returns: Success, general_vhost (HTTP vhost) :rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`) :raises .errors.PluginError: If no viable HTTP host can be created or used for the redirect. """ if "rewrite_module" not in self.parser.modules: self.enable_mod("rewrite") general_vh = self._get_http_vhost(ssl_vhost) if general_vh is None: # Add virtual_server with redirect logger.debug("Did not find http version of ssl virtual host " "attempting to create") redirect_addrs = self._get_proposed_addrs(ssl_vhost) for vhost in self.vhosts: if vhost.enabled and vhost.conflicts(redirect_addrs): raise errors.PluginError( "Unable to find corresponding HTTP vhost; " "Unable to create one as intended addresses conflict; " "Current configuration does not support automated " "redirection") self._create_redirect_vhost(ssl_vhost) else: # Check if LetsEncrypt redirection already exists self._verify_no_letsencrypt_redirect(general_vh) # Note: if code flow gets here it means we didn't find the exact # letsencrypt RewriteRule config for redirection. Finding # another RewriteRule is likely to be fine in most or all cases, # but redirect loops are possible in very obscure cases; see #1620 # for reasoning. if self._is_rewrite_exists(general_vh): logger.warn("Added an HTTP->HTTPS rewrite in addition to " "other RewriteRules; you may wish to check for " "overall consistency.") # Add directives to server # Note: These are not immediately searchable in sites-enabled # even with save() and load() if not self._is_rewrite_engine_on(general_vh): self.parser.add_dir(general_vh.path, "RewriteEngine", "on") if self.get_version() >= (2, 3, 9): self.parser.add_dir(general_vh.path, "RewriteRule", constants.REWRITE_HTTPS_ARGS_WITH_END) else: self.parser.add_dir(general_vh.path, "RewriteRule", constants.REWRITE_HTTPS_ARGS) self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" % (general_vh.filep, ssl_vhost.filep)) self.save() logger.info("Redirecting vhost in %s to ssl vhost in %s", general_vh.filep, ssl_vhost.filep) def _verify_no_letsencrypt_redirect(self, vhost): """Checks to see if a redirect was already installed by letsencrypt. Checks to see if virtualhost already contains a rewrite rule that is identical to Letsencrypt's redirection rewrite rule. :param vhost: vhost to check :type vhost: :class:`~letsencrypt_apache.obj.VirtualHost` :raises errors.PluginEnhancementAlreadyPresent: When the exact letsencrypt redirection WriteRule exists in virtual host. """ rewrite_path = self.parser.find_dir( "RewriteRule", None, start=vhost.path) # There can be other RewriteRule directive lines in vhost config. # rewrite_args_dict keys are directive ids and the corresponding value # for each is a list of arguments to that directive. rewrite_args_dict = defaultdict(list) pat = r'.*(directive\[\d+\]).*' for match in rewrite_path: m = re.match(pat, match) if m: dir_id = m.group(1) rewrite_args_dict[dir_id].append(match) if rewrite_args_dict: redirect_args = [constants.REWRITE_HTTPS_ARGS, constants.REWRITE_HTTPS_ARGS_WITH_END] for matches in rewrite_args_dict.values(): if [self.aug.get(x) for x in matches] in redirect_args: raise errors.PluginEnhancementAlreadyPresent( "Let's Encrypt has already enabled redirection") def _is_rewrite_exists(self, vhost): """Checks if there exists a RewriteRule directive in vhost :param vhost: vhost to check :type vhost: :class:`~letsencrypt_apache.obj.VirtualHost` :returns: True if a RewriteRule directive exists. :rtype: bool """ rewrite_path = self.parser.find_dir( "RewriteRule", None, start=vhost.path) return bool(rewrite_path) def _is_rewrite_engine_on(self, vhost): """Checks if a RewriteEngine directive is on :param vhost: vhost to check :type vhost: :class:`~letsencrypt_apache.obj.VirtualHost` """ rewrite_engine_path = self.parser.find_dir("RewriteEngine", "on", start=vhost.path) if rewrite_engine_path: return self.parser.get_arg(rewrite_engine_path[0]) return False def _create_redirect_vhost(self, ssl_vhost): """Creates an http_vhost specifically to redirect for the ssl_vhost. :param ssl_vhost: ssl vhost :type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost` :returns: tuple of the form (`success`, :class:`~letsencrypt_apache.obj.VirtualHost`) :rtype: tuple """ text = self._get_redirect_config_str(ssl_vhost) redirect_filepath = self._write_out_redirect(ssl_vhost, text) self.aug.load() # Make a new vhost data structure and add it to the lists new_vhost = self._create_vhost(parser.get_aug_path(redirect_filepath)) self.vhosts.append(new_vhost) # Finally create documentation for the change self.save_notes += ("Created a port 80 vhost, %s, for redirection to " "ssl vhost %s\n" % (new_vhost.filep, ssl_vhost.filep)) def _get_redirect_config_str(self, ssl_vhost): # get servernames and serveraliases serveralias = "" servername = "" if ssl_vhost.name is not None: servername = "ServerName " + ssl_vhost.name if ssl_vhost.aliases: serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases) rewrite_rule_args = [] if self.get_version() >= (2, 3, 9): rewrite_rule_args = constants.REWRITE_HTTPS_ARGS_WITH_END else: rewrite_rule_args = constants.REWRITE_HTTPS_ARGS return ("<VirtualHost %s>\n" "%s \n" "%s \n" "ServerSignature Off\n" "\n" "RewriteEngine On\n" "RewriteRule %s\n" "\n" "ErrorLog /var/log/apache2/redirect.error.log\n" "LogLevel warn\n" "</VirtualHost>\n" % (" ".join(str(addr) for addr in self._get_proposed_addrs(ssl_vhost)), servername, serveralias, " ".join(rewrite_rule_args))) def _write_out_redirect(self, ssl_vhost, text): # This is the default name redirect_filename = "le-redirect.conf" # See if a more appropriate name can be applied if ssl_vhost.name is not None: # make sure servername doesn't exceed filename length restriction if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)): redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name redirect_filepath = os.path.join( self.parser.root, "sites-available", redirect_filename) # Register the new file that will be created # Note: always register the creation before writing to ensure file will # be removed in case of unexpected program exit self.reverter.register_file_creation(False, redirect_filepath) # Write out file with open(redirect_filepath, "w") as redirect_file: redirect_file.write(text) logger.info("Created redirect file: %s", redirect_filename) return redirect_filepath def _get_http_vhost(self, ssl_vhost): """Find appropriate HTTP vhost for ssl_vhost.""" # First candidate vhosts filter candidate_http_vhs = [ vhost for vhost in self.vhosts if not vhost.ssl ] # Second filter - check addresses for http_vh in candidate_http_vhs: if http_vh.same_server(ssl_vhost): return http_vh return None def _get_proposed_addrs(self, vhost, port="80"): # pylint: disable=no-self-use """Return all addrs of vhost with the port replaced with the specified. :param obj.VirtualHost ssl_vhost: Original Vhost :param str port: Desired port for new addresses :returns: `set` of :class:`~obj.Addr` """ redirects = set() for addr in vhost.addrs: redirects.add(addr.get_addr_obj(port)) return redirects def get_all_certs_keys(self): """Find all existing keys, certs from configuration. Retrieve all certs and keys set in VirtualHosts on the Apache server :returns: list of tuples with form [(cert, key, path)] cert - str path to certificate file key - str path to associated key file path - File path to configuration file. :rtype: list """ c_k = set() for vhost in self.vhosts: if vhost.ssl: cert_path = self.parser.find_dir( "SSLCertificateFile", None, start=vhost.path, exclude=False) key_path = self.parser.find_dir( "SSLCertificateKeyFile", None, start=vhost.path, exclude=False) if cert_path and key_path: cert = os.path.abspath(self.parser.get_arg(cert_path[-1])) key = os.path.abspath(self.parser.get_arg(key_path[-1])) c_k.add((cert, key, get_file_path(cert_path[-1]))) else: logger.warning( "Invalid VirtualHost configuration - %s", vhost.filep) return c_k def is_site_enabled(self, avail_fp): """Checks to see if the given site is enabled. .. todo:: fix hardcoded sites-enabled, check os.path.samefile :param str avail_fp: Complete file path of available site :returns: Success :rtype: bool """ enabled_dir = os.path.join(self.parser.root, "sites-enabled") for entry in os.listdir(enabled_dir): try: if filecmp.cmp(avail_fp, os.path.join(enabled_dir, entry)): return True except OSError: pass return False def enable_site(self, vhost): """Enables an available site, Apache reload required. .. note:: Does not make sure that the site correctly works or that all modules are enabled appropriately. .. todo:: This function should number subdomains before the domain vhost .. todo:: Make sure link is not broken... :param vhost: vhost to enable :type vhost: :class:`~letsencrypt_apache.obj.VirtualHost` :raises .errors.NotSupportedError: If filesystem layout is not supported. """ if self.is_site_enabled(vhost.filep): return if "/sites-available/" in vhost.filep: enabled_path = ("%s/sites-enabled/%s" % (self.parser.root, os.path.basename(vhost.filep))) self.reverter.register_file_creation(False, enabled_path) os.symlink(vhost.filep, enabled_path) vhost.enabled = True logger.info("Enabling available site: %s", vhost.filep) self.save_notes += "Enabled site %s\n" % vhost.filep else: raise errors.NotSupportedError( "Unsupported filesystem layout. " "sites-available/enabled expected.") def enable_mod(self, mod_name, temp=False): """Enables module in Apache. Both enables and reloads Apache so module is active. :param str mod_name: Name of the module to enable. (e.g. 'ssl') :param bool temp: Whether or not this is a temporary action. :raises .errors.NotSupportedError: If the filesystem layout is not supported. :raises .errors.MisconfigurationError: If a2enmod or a2dismod cannot be run. """ # Support Debian specific setup avail_path = os.path.join(self.parser.root, "mods-available") enabled_path = os.path.join(self.parser.root, "mods-enabled") if not os.path.isdir(avail_path) or not os.path.isdir(enabled_path): raise errors.NotSupportedError( "Unsupported directory layout. You may try to enable mod %s " "and try again." % mod_name) deps = _get_mod_deps(mod_name) # Enable all dependencies for dep in deps: if (dep + "_module") not in self.parser.modules: self._enable_mod_debian(dep, temp) self._add_parser_mod(dep) note = "Enabled dependency of %s module - %s" % (mod_name, dep) if not temp: self.save_notes += note + os.linesep logger.debug(note) # Enable actual module self._enable_mod_debian(mod_name, temp) self._add_parser_mod(mod_name) if not temp: self.save_notes += "Enabled %s module in Apache\n" % mod_name logger.info("Enabled Apache %s module", mod_name) # Modules can enable additional config files. Variables may be defined # within these new configuration sections. # Reload is not necessary as DUMP_RUN_CFG uses latest config. self.parser.update_runtime_variables(self.conf("ctl")) def _add_parser_mod(self, mod_name): """Shortcut for updating parser modules.""" self.parser.modules.add(mod_name + "_module") self.parser.modules.add("mod_" + mod_name + ".c") def _enable_mod_debian(self, mod_name, temp): """Assumes mods-available, mods-enabled layout.""" # Generate reversal command. # Try to be safe here... check that we can probably reverse before # applying enmod command if not le_util.exe_exists(self.conf("dismod")): raise errors.MisconfigurationError( "Unable to find a2dismod, please make sure a2enmod and " "a2dismod are configured correctly for letsencrypt.") self.reverter.register_undo_command( temp, [self.conf("dismod"), mod_name]) le_util.run_script([self.conf("enmod"), mod_name]) def restart(self): """Runs a config test and reloads the Apache server. :raises .errors.MisconfigurationError: If either the config test or reload fails. """ self.config_test() self._reload() def _reload(self): """Reloads the Apache server. :raises .errors.MisconfigurationError: If reload fails """ try: le_util.run_script([self.conf("ctl"), "-k", "graceful"]) except errors.SubprocessError as err: raise errors.MisconfigurationError(str(err)) def config_test(self): # pylint: disable=no-self-use """Check the configuration of Apache for errors. :raises .errors.MisconfigurationError: If config_test fails """ try: le_util.run_script([self.conf("ctl"), "configtest"]) except errors.SubprocessError as err: raise errors.MisconfigurationError(str(err)) def get_version(self): """Return version of Apache Server. Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7)) :returns: version :rtype: tuple :raises .PluginError: if unable to find Apache version """ try: stdout, _ = le_util.run_script([self.conf("ctl"), "-v"]) except errors.SubprocessError: raise errors.PluginError( "Unable to run %s -v" % self.conf("ctl")) regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE) matches = regex.findall(stdout) if len(matches) != 1: raise errors.PluginError("Unable to find Apache version") return tuple([int(i) for i in matches[0].split(".")]) def more_info(self): """Human-readable string to help understand the module""" return ( "Configures Apache to authenticate and install HTTPS.{0}" "Server root: {root}{0}" "Version: {version}".format( os.linesep, root=self.parser.loc["root"], version=".".join(str(i) for i in self.version)) ) ########################################################################### # Challenges Section ########################################################################### def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use """Return list of challenge preferences.""" return [challenges.TLSSNI01] def perform(self, achalls): """Perform the configuration related challenge. This function currently assumes all challenges will be fulfilled. If this turns out not to be the case in the future. Cleanup and outstanding challenges will have to be designed better. """ self._chall_out.update(achalls) responses = [None] * len(achalls) chall_doer = tls_sni_01.ApacheTlsSni01(self) for i, achall in enumerate(achalls): # Currently also have chall_doer hold associated index of the # challenge. This helps to put all of the responses back together # when they are all complete. chall_doer.add_chall(achall, i) sni_response = chall_doer.perform() if sni_response: # Must reload in order to activate the challenges. # Handled here because we may be able to load up other challenge # types self.restart() # TODO: Remove this dirty hack. We need to determine a reliable way # of identifying when the new configuration is being used. time.sleep(3) # Go through all of the challenges and assign them to the proper # place in the responses return value. All responses must be in the # same order as the original challenges. for i, resp in enumerate(sni_response): responses[chall_doer.indices[i]] = resp return responses def cleanup(self, achalls): """Revert all challenges.""" self._chall_out.difference_update(achalls) # If all of the challenges have been finished, clean up everything if not self._chall_out: self.revert_challenge_config() self.restart() self.parser.init_modules() def _get_mod_deps(mod_name): """Get known module dependencies. .. note:: This does not need to be accurate in order for the client to run. This simply keeps things clean if the user decides to revert changes. .. warning:: If all deps are not included, it may cause incorrect parsing behavior, due to enable_mod's shortcut for updating the parser's currently defined modules (`.ApacheConfigurator._add_parser_mod`) This would only present a major problem in extremely atypical configs that use ifmod for the missing deps. """ deps = { "ssl": ["setenvif", "mime", "socache_shmcb"] } return deps.get(mod_name, []) def get_file_path(vhost_path): """Get file path from augeas_vhost_path. Takes in Augeas path and returns the file name :param str vhost_path: Augeas virtual host path :returns: filename of vhost :rtype: str """ # Strip off /files avail_fp = vhost_path[6:] # This can be optimized... while True: # Cast all to lowercase to be case insensitive find_if = avail_fp.lower().find("/ifmodule") if find_if != -1: avail_fp = avail_fp[:find_if] continue find_vh = avail_fp.lower().find("/virtualhost") if find_vh != -1: avail_fp = avail_fp[:find_vh] continue find_macro = avail_fp.lower().find("/macro") if find_macro != -1: avail_fp = avail_fp[:find_macro] continue break return avail_fp def install_ssl_options_conf(options_ssl): """ Copy Let's Encrypt's SSL options file into the system's config dir if required. """ # XXX if we ever try to enforce a local privilege boundary (eg, running # letsencrypt for unprivileged users via setuid), this function will need # to be modified. # XXX if the user is in security-autoupdate mode, we should be willing to # overwrite the options_ssl file at least if it's unmodified: # https://github.com/letsencrypt/letsencrypt/issues/1123 # Check to make sure options-ssl.conf is installed if not os.path.isfile(options_ssl): shutil.copyfile(constants.MOD_SSL_CONF_SRC, options_ssl)
38.845361
95
0.608935
import filecmp import logging import os import re import shutil import socket import time import zope.interface from acme import challenges from letsencrypt import errors from letsencrypt import interfaces from letsencrypt import le_util from letsencrypt.plugins import common from letsencrypt_apache import augeas_configurator from letsencrypt_apache import constants from letsencrypt_apache import display_ops from letsencrypt_apache import tls_sni_01 from letsencrypt_apache import obj from letsencrypt_apache import parser from collections import defaultdict logger = logging.getLogger(__name__) # This is not able to be completely remedied by regular expressions because # Augeas views <VirtualHost> </Virtualhost> as an error. This will just # require another check_parsing_errors() after all files are included... # (after a find_directive search is executed currently). It can be a one # time check however because all of LE's transactions will ensure # recognize names in sites-enabled that have spaces. These are not added to the # Apache configuration. It may be wise to warn the user if they are trying # to use vhost filenames that contain spaces and offer to change ' ' to '_' # Note: FILEPATHS and changes to files are transactional. They are copied # over before the updates are made to the existing files. NEW_FILES is # transactional due to the use of register_file_creation() # TODO: Verify permissions on configuration root... it is easier than # checking permissions on each of the relative directories and less error # prone. # TODO: Write a server protocol finder. Listen <port> <protocol> or # Protocol <protocol>. This can verify partial setups are correct # TODO: Add directives to sites-enabled... not sites-available. # sites-available doesn't allow immediate find_dir search even with save() class ApacheConfigurator(augeas_configurator.AugeasConfigurator): zope.interface.implements(interfaces.IAuthenticator, interfaces.IInstaller) zope.interface.classProvides(interfaces.IPluginFactory) description = "Apache Web Server - Alpha" @classmethod def add_parser_arguments(cls, add): add("ctl", default=constants.CLI_DEFAULTS["ctl"], help="Path to the 'apache2ctl' binary, used for 'configtest', " "retrieving the Apache2 version number, and initialization " "parameters.") add("enmod", default=constants.CLI_DEFAULTS["enmod"], help="Path to the Apache 'a2enmod' binary.") add("dismod", default=constants.CLI_DEFAULTS["dismod"], help="Path to the Apache 'a2dismod' binary.") add("le-vhost-ext", default=constants.CLI_DEFAULTS["le_vhost_ext"], help="SSL vhost configuration extension.") add("server-root", default=constants.CLI_DEFAULTS["server_root"], help="Apache server root directory.") le_util.add_deprecated_argument(add, "init-script", 1) def __init__(self, *args, **kwargs): version = kwargs.pop("version", None) super(ApacheConfigurator, self).__init__(*args, **kwargs) self.assoc = dict() self._chall_out = set() self.parser = None self.version = version self.vhosts = None self._enhance_func = {"redirect": self._enable_redirect, "ensure-http-header": self._set_http_header} @property def mod_ssl_conf(self): return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST) def prepare(self): for exe in (self.conf("ctl"), self.conf("enmod"), self.conf("dismod")): if not le_util.exe_exists(exe): raise errors.NoInstallationError self.config_test() self.parser = parser.ApacheParser( self.aug, self.conf("server-root"), self.conf("ctl")) self.check_parsing_errors("httpd.aug") if self.version is None: self.version = self.get_version() if self.version < (2, 2): raise errors.NotSupportedError( "Apache Version %s not supported.", str(self.version)) self.vhosts = self.get_virtual_hosts() install_ssl_options_conf(self.mod_ssl_conf) def deploy_cert(self, domain, cert_path, key_path, chain_path=None, fullchain_path=None): vhost = self.choose_vhost(domain) self._clean_vhost(vhost) self.prepare_server_https("443") path = {"cert_path": self.parser.find_dir("SSLCertificateFile", None, vhost.path), "cert_key": self.parser.find_dir("SSLCertificateKeyFile", None, vhost.path)} if chain_path is not None: path["chain_path"] = self.parser.find_dir( "SSLCertificateChainFile", None, vhost.path) if not path["cert_path"] or not path["cert_key"]: logger.warn( "Cannot find a cert or key directive in %s. " "VirtualHost was not modified", vhost.path) # Presumably break here so that the virtualhost is not modified raise errors.PluginError( "Unable to find cert and/or key directives") logger.info("Deploying Certificate to VirtualHost %s", vhost.filep) logger.debug("Apache version is %s", ".".join(str(i) for i in self.version)) if self.version < (2, 4, 8) or (chain_path and not fullchain_path): # install SSLCertificateFile, SSLCertificateKeyFile, # and SSLCertificateChainFile directives set_cert_path = cert_path self.aug.set(path["cert_path"][-1], cert_path) self.aug.set(path["cert_key"][-1], key_path) if chain_path is not None: self.parser.add_dir(vhost.path, "SSLCertificateChainFile", chain_path) else: raise errors.PluginError("--chain-path is required for your version of Apache") else: if not fullchain_path: raise errors.PluginError("Please provide the --fullchain-path\ option pointing to your full chain file") set_cert_path = fullchain_path self.aug.set(path["cert_path"][-1], fullchain_path) self.aug.set(path["cert_key"][-1], key_path) # Save notes about the transaction that took place self.save_notes += ("Changed vhost at %s with addresses of %s\n" "\tSSLCertificateFile %s\n" "\tSSLCertificateKeyFile %s\n" % (vhost.filep, ", ".join(str(addr) for addr in vhost.addrs), set_cert_path, key_path)) if chain_path is not None: self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path # Make sure vhost is enabled if not vhost.enabled: self.enable_site(vhost) def choose_vhost(self, target_name, temp=False): # Allows for domain names to be associated with a virtual host if target_name in self.assoc: return self.assoc[target_name] # Try to find a reasonable vhost vhost = self._find_best_vhost(target_name) if vhost is not None: if temp: return vhost if not vhost.ssl: vhost = self.make_vhost_ssl(vhost) self.assoc[target_name] = vhost return vhost return self._choose_vhost_from_list(target_name, temp) def _choose_vhost_from_list(self, target_name, temp=False): # Select a vhost from a list vhost = display_ops.select_vhost(target_name, self.vhosts) if vhost is None: logger.error( "No vhost exists with servername or alias of: %s. " "No vhost was selected. Please specify servernames " "in the Apache config", target_name) raise errors.PluginError("No vhost selected") elif temp: return vhost elif not vhost.ssl: addrs = self._get_proposed_addrs(vhost, "443") # TODO: Conflicts is too conservative if not any(vhost.enabled and vhost.conflicts(addrs) for vhost in self.vhosts): vhost = self.make_vhost_ssl(vhost) else: logger.error( "The selected vhost would conflict with other HTTPS " "VirtualHosts within Apache. Please select another " "vhost or add ServerNames to your configuration.") raise errors.PluginError( "VirtualHost not able to be selected.") self.assoc[target_name] = vhost return vhost def _find_best_vhost(self, target_name): # Points 4 - Servername SSL # Points 3 - Address name with SSL # Points 2 - Servername no SSL # Points 1 - Address name with no SSL best_candidate = None best_points = 0 for vhost in self.vhosts: if vhost.modmacro is True: continue if target_name in vhost.get_names(): points = 2 elif any(addr.get_addr() == target_name for addr in vhost.addrs): points = 1 else: # No points given if names can't be found. # This gets hit but doesn't register continue # pragma: no cover if vhost.ssl: points += 2 if points > best_points: best_points = points best_candidate = vhost # No winners here... is there only one reasonable vhost? if best_candidate is None: # reasonable == Not all _default_ addrs vhosts = self._non_default_vhosts() # remove mod_macro hosts from reasonable vhosts reasonable_vhosts = [vh for vh in vhosts if vh.modmacro is False] if len(reasonable_vhosts) == 1: best_candidate = reasonable_vhosts[0] return best_candidate def _non_default_vhosts(self): return [vh for vh in self.vhosts if not all( addr.get_addr() == "_default_" for addr in vh.addrs )] def get_all_names(self): all_names = set() vhost_macro = [] for vhost in self.vhosts: all_names.update(vhost.get_names()) if vhost.modmacro: vhost_macro.append(vhost.filep) for addr in vhost.addrs: if common.hostname_regex.match(addr.get_addr()): all_names.add(addr.get_addr()) else: name = self.get_name_from_ip(addr) if name: all_names.add(name) if len(vhost_macro) > 0: zope.component.getUtility(interfaces.IDisplay).notification( "Apache mod_macro seems to be in use in file(s):\n{0}" "\n\nUnfortunately mod_macro is not yet supported".format( "\n ".join(vhost_macro))) return all_names def get_name_from_ip(self, addr): # pylint: disable=no-self-use # If it isn't a private IP, do a reverse DNS lookup if not common.private_ips_regex.match(addr.get_addr()): try: socket.inet_aton(addr.get_addr()) return socket.gethostbyaddr(addr.get_addr())[0] except (socket.error, socket.herror, socket.timeout): pass return "" def _add_servernames(self, host): # Take the final ServerName as each overrides the previous servername_match = self.parser.find_dir( "ServerName", None, start=host.path, exclude=False) serveralias_match = self.parser.find_dir( "ServerAlias", None, start=host.path, exclude=False) for alias in serveralias_match: serveralias = self.parser.get_arg(alias) if not host.modmacro: host.aliases.add(serveralias) if servername_match: # Get last ServerName as each overwrites the previous servername = self.parser.get_arg(servername_match[-1]) if not host.modmacro: host.name = servername def _create_vhost(self, path): addrs = set() args = self.aug.match(path + "/arg") for arg in args: addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg))) is_ssl = False if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False): is_ssl = True # "SSLEngine on" might be set outside of <VirtualHost> # Treat vhosts with port 443 as ssl vhosts for addr in addrs: if addr.get_port() == "443": is_ssl = True filename = get_file_path(path) is_enabled = self.is_site_enabled(filename) macro = False if "/macro/" in path.lower(): macro = True vhost = obj.VirtualHost(filename, path, addrs, is_ssl, is_enabled, modmacro=macro) self._add_servernames(vhost) return vhost # TODO: make "sites-available" a configurable directory def get_virtual_hosts(self): # Search sites-available, httpd.conf for possible virtual hosts paths = self.aug.match( ("/files%s/sites-available//*[label()=~regexp('%s')]" % (self.parser.root, parser.case_i("VirtualHost")))) vhs = [] for path in paths: vhs.append(self._create_vhost(path)) return vhs def is_name_vhost(self, target_addr): # Mixed and matched wildcard NameVirtualHost with VirtualHost # behavior is undefined. Make sure that an exact match exists # search for NameVirtualHost directive for ip_addr # note ip_addr can be FQDN although Apache does not recommend it return (self.version >= (2, 4) or self.parser.find_dir("NameVirtualHost", str(target_addr))) def add_name_vhost(self, addr): loc = parser.get_aug_path(self.parser.loc["name"]) if addr.get_port() == "443": path = self.parser.add_dir_to_ifmodssl( loc, "NameVirtualHost", [str(addr)]) else: path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)]) msg = ("Setting %s to be NameBasedVirtualHost\n" "\tDirective added to %s\n" % (addr, path)) logger.debug(msg) self.save_notes += msg def prepare_server_https(self, port, temp=False): if "ssl_module" not in self.parser.modules: self.enable_mod("ssl", temp=temp) # Check for Listen <port> # Note: This could be made to also look for ip:443 combo listens = [self.parser.get_arg(x).split()[0] for x in self.parser.find_dir("Listen")] # In case no Listens are set (which really is a broken apache config) if not listens: listens = ["80"] for listen in listens: # For any listen statement, check if the machine also listens on Port 443. # If not, add such a listen statement. if len(listen.split(":")) == 1: # Its listening to all interfaces if port not in listens: if port == "443": args = [port] else: # Non-standard ports should specify https protocol args = [port, "https"] self.parser.add_dir_to_ifmodssl( parser.get_aug_path( self.parser.loc["listen"]), "Listen", args) self.save_notes += "Added Listen %s directive to %s\n" % ( port, self.parser.loc["listen"]) listens.append(port) else: # The Listen statement specifies an ip _, ip = listen[::-1].split(":", 1) ip = ip[::-1] if "%s:%s" % (ip, port) not in listens: if port == "443": args = ["%s:%s" % (ip, port)] else: # Non-standard ports should specify https protocol args = ["%s:%s" % (ip, port), "https"] self.parser.add_dir_to_ifmodssl( parser.get_aug_path( self.parser.loc["listen"]), "Listen", args) self.save_notes += "Added Listen %s:%s directive to %s\n" % ( ip, port, self.parser.loc["listen"]) listens.append("%s:%s" % (ip, port)) def make_addrs_sni_ready(self, addrs): # Version 2.4 and later are automatically SNI ready. if self.version >= (2, 4): return for addr in addrs: if not self.is_name_vhost(addr): logger.debug("Setting VirtualHost at %s to be a name " "based virtual host", addr) self.add_name_vhost(addr) def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals avail_fp = nonssl_vhost.filep ssl_fp = self._get_ssl_vhost_path(avail_fp) self._copy_create_ssl_vhost_skeleton(avail_fp, ssl_fp) # Reload augeas to take into account the new vhost self.aug.load() # Get Vhost augeas path for new vhost vh_p = self.aug.match("/files%s//* [label()=~regexp('%s')]" % (ssl_fp, parser.case_i("VirtualHost"))) if len(vh_p) != 1: logger.error("Error: should only be one vhost in %s", avail_fp) raise errors.PluginError("Currently, we only support " "configurations with one vhost per file") else: # This simplifies the process vh_p = vh_p[0] # Update Addresses self._update_ssl_vhosts_addrs(vh_p) # Add directives self._add_dummy_ssl_directives(vh_p) # Log actions and create save notes logger.info("Created an SSL vhost at %s", ssl_fp) self.save_notes += "Created ssl vhost at %s\n" % ssl_fp self.save() # We know the length is one because of the assertion above # Create the Vhost object ssl_vhost = self._create_vhost(vh_p) self.vhosts.append(ssl_vhost) # NOTE: Searches through Augeas seem to ruin changes to directives # The configuration must also be saved before being searched # for the new directives; For these reasons... this is tacked # on after fully creating the new vhost # Now check if addresses need to be added as NameBasedVhost addrs # This is for compliance with versions of Apache < 2.4 self._add_name_vhost_if_necessary(ssl_vhost) return ssl_vhost def _get_ssl_vhost_path(self, non_ssl_vh_fp): # Get filepath of new ssl_vhost if non_ssl_vh_fp.endswith(".conf"): return non_ssl_vh_fp[:-(len(".conf"))] + self.conf("le_vhost_ext") else: return non_ssl_vh_fp + self.conf("le_vhost_ext") def _copy_create_ssl_vhost_skeleton(self, avail_fp, ssl_fp): # First register the creation so that it is properly removed if # configuration is rolled back self.reverter.register_file_creation(False, ssl_fp) try: with open(avail_fp, "r") as orig_file: with open(ssl_fp, "w") as new_file: new_file.write("<IfModule mod_ssl.c>\n") for line in orig_file: new_file.write(line) new_file.write("</IfModule>\n") except IOError: logger.fatal("Error writing/reading to file in make_vhost_ssl") raise errors.PluginError("Unable to write/read in make_vhost_ssl") def _update_ssl_vhosts_addrs(self, vh_path): ssl_addrs = set() ssl_addr_p = self.aug.match(vh_path + "/arg") for addr in ssl_addr_p: old_addr = obj.Addr.fromstring( str(self.parser.get_arg(addr))) ssl_addr = old_addr.get_addr_obj("443") self.aug.set(addr, str(ssl_addr)) ssl_addrs.add(ssl_addr) return ssl_addrs def _clean_vhost(self, vhost): # remove duplicated or conflicting ssl directives self._deduplicate_directives(vhost.path, ["SSLCertificateFile", "SSLCertificateKeyFile"]) # remove all problematic directives self._remove_directives(vhost.path, ["SSLCertificateChainFile"]) def _deduplicate_directives(self, vh_path, directives): for directive in directives: while len(self.parser.find_dir(directive, None, vh_path, False)) > 1: directive_path = self.parser.find_dir(directive, None, vh_path, False) self.aug.remove(re.sub(r"/\w*$", "", directive_path[0])) def _remove_directives(self, vh_path, directives): for directive in directives: while len(self.parser.find_dir(directive, None, vh_path, False)) > 0: directive_path = self.parser.find_dir(directive, None, vh_path, False) self.aug.remove(re.sub(r"/\w*$", "", directive_path[0])) def _add_dummy_ssl_directives(self, vh_path): self.parser.add_dir(vh_path, "SSLCertificateFile", "insert_cert_file_path") self.parser.add_dir(vh_path, "SSLCertificateKeyFile", "insert_key_file_path") self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf) def _add_name_vhost_if_necessary(self, vhost): need_to_save = False # See if the exact address appears in any other vhost # Remember 1.1.1.1:* == 1.1.1.1 -> hence any() for addr in vhost.addrs: for test_vh in self.vhosts: if (vhost.filep != test_vh.filep and any(test_addr == addr for test_addr in test_vh.addrs) and not self.is_name_vhost(addr)): self.add_name_vhost(addr) logger.info("Enabling NameVirtualHosts on %s", addr) need_to_save = True if need_to_save: self.save() ############################################################################ # Enhancements ############################################################################ def supported_enhancements(self): # pylint: disable=no-self-use return ["redirect", "ensure-http-header"] def enhance(self, domain, enhancement, options=None): try: func = self._enhance_func[enhancement] except KeyError: raise errors.PluginError( "Unsupported enhancement: {0}".format(enhancement)) try: func(self.choose_vhost(domain), options) except errors.PluginError: logger.warn("Failed %s for %s", enhancement, domain) raise def _set_http_header(self, ssl_vhost, header_substring): if "headers_module" not in self.parser.modules: self.enable_mod("headers") # Check if selected header is already set self._verify_no_matching_http_header(ssl_vhost, header_substring) # Add directives to server self.parser.add_dir(ssl_vhost.path, "Header", constants.HEADER_ARGS[header_substring]) self.save_notes += ("Adding %s header to ssl vhost in %s\n" % (header_substring, ssl_vhost.filep)) self.save() logger.info("Adding %s header to ssl vhost in %s", header_substring, ssl_vhost.filep) def _verify_no_matching_http_header(self, ssl_vhost, header_substring): header_path = self.parser.find_dir("Header", None, start=ssl_vhost.path) if header_path: # "Existing Header directive for virtualhost" pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower()) for match in header_path: if re.search(pat, self.aug.get(match).lower()): raise errors.PluginEnhancementAlreadyPresent( "Existing %s header" % (header_substring)) def _enable_redirect(self, ssl_vhost, unused_options): if "rewrite_module" not in self.parser.modules: self.enable_mod("rewrite") general_vh = self._get_http_vhost(ssl_vhost) if general_vh is None: # Add virtual_server with redirect logger.debug("Did not find http version of ssl virtual host " "attempting to create") redirect_addrs = self._get_proposed_addrs(ssl_vhost) for vhost in self.vhosts: if vhost.enabled and vhost.conflicts(redirect_addrs): raise errors.PluginError( "Unable to find corresponding HTTP vhost; " "Unable to create one as intended addresses conflict; " "Current configuration does not support automated " "redirection") self._create_redirect_vhost(ssl_vhost) else: # Check if LetsEncrypt redirection already exists self._verify_no_letsencrypt_redirect(general_vh) # Note: if code flow gets here it means we didn't find the exact # letsencrypt RewriteRule config for redirection. Finding # another RewriteRule is likely to be fine in most or all cases, # but redirect loops are possible in very obscure cases; see #1620 # for reasoning. if self._is_rewrite_exists(general_vh): logger.warn("Added an HTTP->HTTPS rewrite in addition to " "other RewriteRules; you may wish to check for " "overall consistency.") # Add directives to server # Note: These are not immediately searchable in sites-enabled # even with save() and load() if not self._is_rewrite_engine_on(general_vh): self.parser.add_dir(general_vh.path, "RewriteEngine", "on") if self.get_version() >= (2, 3, 9): self.parser.add_dir(general_vh.path, "RewriteRule", constants.REWRITE_HTTPS_ARGS_WITH_END) else: self.parser.add_dir(general_vh.path, "RewriteRule", constants.REWRITE_HTTPS_ARGS) self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" % (general_vh.filep, ssl_vhost.filep)) self.save() logger.info("Redirecting vhost in %s to ssl vhost in %s", general_vh.filep, ssl_vhost.filep) def _verify_no_letsencrypt_redirect(self, vhost): rewrite_path = self.parser.find_dir( "RewriteRule", None, start=vhost.path) # There can be other RewriteRule directive lines in vhost config. # rewrite_args_dict keys are directive ids and the corresponding value # for each is a list of arguments to that directive. rewrite_args_dict = defaultdict(list) pat = r'.*(directive\[\d+\]).*' for match in rewrite_path: m = re.match(pat, match) if m: dir_id = m.group(1) rewrite_args_dict[dir_id].append(match) if rewrite_args_dict: redirect_args = [constants.REWRITE_HTTPS_ARGS, constants.REWRITE_HTTPS_ARGS_WITH_END] for matches in rewrite_args_dict.values(): if [self.aug.get(x) for x in matches] in redirect_args: raise errors.PluginEnhancementAlreadyPresent( "Let's Encrypt has already enabled redirection") def _is_rewrite_exists(self, vhost): rewrite_path = self.parser.find_dir( "RewriteRule", None, start=vhost.path) return bool(rewrite_path) def _is_rewrite_engine_on(self, vhost): rewrite_engine_path = self.parser.find_dir("RewriteEngine", "on", start=vhost.path) if rewrite_engine_path: return self.parser.get_arg(rewrite_engine_path[0]) return False def _create_redirect_vhost(self, ssl_vhost): text = self._get_redirect_config_str(ssl_vhost) redirect_filepath = self._write_out_redirect(ssl_vhost, text) self.aug.load() # Make a new vhost data structure and add it to the lists new_vhost = self._create_vhost(parser.get_aug_path(redirect_filepath)) self.vhosts.append(new_vhost) # Finally create documentation for the change self.save_notes += ("Created a port 80 vhost, %s, for redirection to " "ssl vhost %s\n" % (new_vhost.filep, ssl_vhost.filep)) def _get_redirect_config_str(self, ssl_vhost): # get servernames and serveraliases serveralias = "" servername = "" if ssl_vhost.name is not None: servername = "ServerName " + ssl_vhost.name if ssl_vhost.aliases: serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases) rewrite_rule_args = [] if self.get_version() >= (2, 3, 9): rewrite_rule_args = constants.REWRITE_HTTPS_ARGS_WITH_END else: rewrite_rule_args = constants.REWRITE_HTTPS_ARGS return ("<VirtualHost %s>\n" "%s \n" "%s \n" "ServerSignature Off\n" "\n" "RewriteEngine On\n" "RewriteRule %s\n" "\n" "ErrorLog /var/log/apache2/redirect.error.log\n" "LogLevel warn\n" "</VirtualHost>\n" % (" ".join(str(addr) for addr in self._get_proposed_addrs(ssl_vhost)), servername, serveralias, " ".join(rewrite_rule_args))) def _write_out_redirect(self, ssl_vhost, text): # This is the default name redirect_filename = "le-redirect.conf" # See if a more appropriate name can be applied if ssl_vhost.name is not None: # make sure servername doesn't exceed filename length restriction if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)): redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name redirect_filepath = os.path.join( self.parser.root, "sites-available", redirect_filename) # Register the new file that will be created # Note: always register the creation before writing to ensure file will # be removed in case of unexpected program exit self.reverter.register_file_creation(False, redirect_filepath) # Write out file with open(redirect_filepath, "w") as redirect_file: redirect_file.write(text) logger.info("Created redirect file: %s", redirect_filename) return redirect_filepath def _get_http_vhost(self, ssl_vhost): # First candidate vhosts filter candidate_http_vhs = [ vhost for vhost in self.vhosts if not vhost.ssl ] # Second filter - check addresses for http_vh in candidate_http_vhs: if http_vh.same_server(ssl_vhost): return http_vh return None def _get_proposed_addrs(self, vhost, port="80"): # pylint: disable=no-self-use redirects = set() for addr in vhost.addrs: redirects.add(addr.get_addr_obj(port)) return redirects def get_all_certs_keys(self): c_k = set() for vhost in self.vhosts: if vhost.ssl: cert_path = self.parser.find_dir( "SSLCertificateFile", None, start=vhost.path, exclude=False) key_path = self.parser.find_dir( "SSLCertificateKeyFile", None, start=vhost.path, exclude=False) if cert_path and key_path: cert = os.path.abspath(self.parser.get_arg(cert_path[-1])) key = os.path.abspath(self.parser.get_arg(key_path[-1])) c_k.add((cert, key, get_file_path(cert_path[-1]))) else: logger.warning( "Invalid VirtualHost configuration - %s", vhost.filep) return c_k def is_site_enabled(self, avail_fp): enabled_dir = os.path.join(self.parser.root, "sites-enabled") for entry in os.listdir(enabled_dir): try: if filecmp.cmp(avail_fp, os.path.join(enabled_dir, entry)): return True except OSError: pass return False def enable_site(self, vhost): if self.is_site_enabled(vhost.filep): return if "/sites-available/" in vhost.filep: enabled_path = ("%s/sites-enabled/%s" % (self.parser.root, os.path.basename(vhost.filep))) self.reverter.register_file_creation(False, enabled_path) os.symlink(vhost.filep, enabled_path) vhost.enabled = True logger.info("Enabling available site: %s", vhost.filep) self.save_notes += "Enabled site %s\n" % vhost.filep else: raise errors.NotSupportedError( "Unsupported filesystem layout. " "sites-available/enabled expected.") def enable_mod(self, mod_name, temp=False): # Support Debian specific setup avail_path = os.path.join(self.parser.root, "mods-available") enabled_path = os.path.join(self.parser.root, "mods-enabled") if not os.path.isdir(avail_path) or not os.path.isdir(enabled_path): raise errors.NotSupportedError( "Unsupported directory layout. You may try to enable mod %s " "and try again." % mod_name) deps = _get_mod_deps(mod_name) # Enable all dependencies for dep in deps: if (dep + "_module") not in self.parser.modules: self._enable_mod_debian(dep, temp) self._add_parser_mod(dep) note = "Enabled dependency of %s module - %s" % (mod_name, dep) if not temp: self.save_notes += note + os.linesep logger.debug(note) # Enable actual module self._enable_mod_debian(mod_name, temp) self._add_parser_mod(mod_name) if not temp: self.save_notes += "Enabled %s module in Apache\n" % mod_name logger.info("Enabled Apache %s module", mod_name) # Modules can enable additional config files. Variables may be defined # within these new configuration sections. # Reload is not necessary as DUMP_RUN_CFG uses latest config. self.parser.update_runtime_variables(self.conf("ctl")) def _add_parser_mod(self, mod_name): self.parser.modules.add(mod_name + "_module") self.parser.modules.add("mod_" + mod_name + ".c") def _enable_mod_debian(self, mod_name, temp): # Generate reversal command. # Try to be safe here... check that we can probably reverse before # applying enmod command if not le_util.exe_exists(self.conf("dismod")): raise errors.MisconfigurationError( "Unable to find a2dismod, please make sure a2enmod and " "a2dismod are configured correctly for letsencrypt.") self.reverter.register_undo_command( temp, [self.conf("dismod"), mod_name]) le_util.run_script([self.conf("enmod"), mod_name]) def restart(self): self.config_test() self._reload() def _reload(self): try: le_util.run_script([self.conf("ctl"), "-k", "graceful"]) except errors.SubprocessError as err: raise errors.MisconfigurationError(str(err)) def config_test(self): # pylint: disable=no-self-use try: le_util.run_script([self.conf("ctl"), "configtest"]) except errors.SubprocessError as err: raise errors.MisconfigurationError(str(err)) def get_version(self): try: stdout, _ = le_util.run_script([self.conf("ctl"), "-v"]) except errors.SubprocessError: raise errors.PluginError( "Unable to run %s -v" % self.conf("ctl")) regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE) matches = regex.findall(stdout) if len(matches) != 1: raise errors.PluginError("Unable to find Apache version") return tuple([int(i) for i in matches[0].split(".")]) def more_info(self): return ( "Configures Apache to authenticate and install HTTPS.{0}" "Server root: {root}{0}" "Version: {version}".format( os.linesep, root=self.parser.loc["root"], version=".".join(str(i) for i in self.version)) ) ########################################################################### # Challenges Section ########################################################################### def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use return [challenges.TLSSNI01] def perform(self, achalls): self._chall_out.update(achalls) responses = [None] * len(achalls) chall_doer = tls_sni_01.ApacheTlsSni01(self) for i, achall in enumerate(achalls): # Currently also have chall_doer hold associated index of the # challenge. This helps to put all of the responses back together # when they are all complete. chall_doer.add_chall(achall, i) sni_response = chall_doer.perform() if sni_response: # Must reload in order to activate the challenges. # Handled here because we may be able to load up other challenge # types self.restart() # TODO: Remove this dirty hack. We need to determine a reliable way # of identifying when the new configuration is being used. time.sleep(3) # Go through all of the challenges and assign them to the proper # place in the responses return value. All responses must be in the # same order as the original challenges. for i, resp in enumerate(sni_response): responses[chall_doer.indices[i]] = resp return responses def cleanup(self, achalls): self._chall_out.difference_update(achalls) # If all of the challenges have been finished, clean up everything if not self._chall_out: self.revert_challenge_config() self.restart() self.parser.init_modules() def _get_mod_deps(mod_name): deps = { "ssl": ["setenvif", "mime", "socache_shmcb"] } return deps.get(mod_name, []) def get_file_path(vhost_path): # Strip off /files avail_fp = vhost_path[6:] # This can be optimized... while True: # Cast all to lowercase to be case insensitive find_if = avail_fp.lower().find("/ifmodule") if find_if != -1: avail_fp = avail_fp[:find_if] continue find_vh = avail_fp.lower().find("/virtualhost") if find_vh != -1: avail_fp = avail_fp[:find_vh] continue find_macro = avail_fp.lower().find("/macro") if find_macro != -1: avail_fp = avail_fp[:find_macro] continue break return avail_fp def install_ssl_options_conf(options_ssl): # XXX if we ever try to enforce a local privilege boundary (eg, running # letsencrypt for unprivileged users via setuid), this function will need # to be modified. # XXX if the user is in security-autoupdate mode, we should be willing to # overwrite the options_ssl file at least if it's unmodified: # https://github.com/letsencrypt/letsencrypt/issues/1123 # Check to make sure options-ssl.conf is installed if not os.path.isfile(options_ssl): shutil.copyfile(constants.MOD_SSL_CONF_SRC, options_ssl)
true
true
f724930ad6413116333562d13934d7c53d5d2f11
1,084
py
Python
geocode-run.py
kmcurry/story-where
8bb1e9cc73f4f6aefb8edf40137ab55d1ecde3f6
[ "MIT" ]
null
null
null
geocode-run.py
kmcurry/story-where
8bb1e9cc73f4f6aefb8edf40137ab55d1ecde3f6
[ "MIT" ]
25
2020-01-02T23:55:30.000Z
2020-03-15T21:25:16.000Z
geocode-run.py
kmcurry/story-where
8bb1e9cc73f4f6aefb8edf40137ab55d1ecde3f6
[ "MIT" ]
null
null
null
from utils.db import Database from datetime import datetime import hashlib import json import os import requests url = 'https://maps.googleapis.com/maps/api/geocode/json' api_key = os.environ['GOOGLE_MAPS_API_KEY'] params = {'key': api_key, 'address': 'Mountain View, CA'} print("Downloading entities") db = Database() entities = db.get_entities_to_geocode() print("Entities downloaded", len(entities)) for entity in entities: print(entity.name) h = hashlib.md5(bytes(entity.name, encoding='utf-8')).hexdigest() print(h) outfile = ".\\geocode_results\\" + h + ".json" print(outfile) if os.path.exists(outfile): print("Skipping because Geocode results already exist") continue if not os.path.exists(os.path.dirname(outfile)): os.makedirs(os.path.dirname(outfile)) params['address'] = entity.name r = requests.get(url, params=params) results = r.json() results['address'] = entity.name results['collected_utc_date'] = str(datetime.utcnow()) with open(outfile, 'w') as f: json.dump(results, f)
26.439024
69
0.685424
from utils.db import Database from datetime import datetime import hashlib import json import os import requests url = 'https://maps.googleapis.com/maps/api/geocode/json' api_key = os.environ['GOOGLE_MAPS_API_KEY'] params = {'key': api_key, 'address': 'Mountain View, CA'} print("Downloading entities") db = Database() entities = db.get_entities_to_geocode() print("Entities downloaded", len(entities)) for entity in entities: print(entity.name) h = hashlib.md5(bytes(entity.name, encoding='utf-8')).hexdigest() print(h) outfile = ".\\geocode_results\\" + h + ".json" print(outfile) if os.path.exists(outfile): print("Skipping because Geocode results already exist") continue if not os.path.exists(os.path.dirname(outfile)): os.makedirs(os.path.dirname(outfile)) params['address'] = entity.name r = requests.get(url, params=params) results = r.json() results['address'] = entity.name results['collected_utc_date'] = str(datetime.utcnow()) with open(outfile, 'w') as f: json.dump(results, f)
true
true
f724934d8bc8300e61211e06181ef9feb6293b80
4,506
py
Python
generate.py
djohansson/volk
a6418b5ea289e9130429717654571cf89d603fdc
[ "MIT", "Unlicense" ]
1
2018-12-23T11:04:22.000Z
2018-12-23T11:04:22.000Z
Common_3/ThirdParty/OpenSource/volk/generate.py
cmkandpane/The-Forge
63a3c3038c1b48184b207ebeed5a5548d52648e6
[ "Apache-2.0" ]
null
null
null
Common_3/ThirdParty/OpenSource/volk/generate.py
cmkandpane/The-Forge
63a3c3038c1b48184b207ebeed5a5548d52648e6
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python3 from collections import OrderedDict import sys import urllib import xml.etree.ElementTree as etree import urllib.request def parse_xml(path): file = urllib.request.urlopen(path) if path.startswith("http") else open(path, 'r') with file: tree = etree.parse(file) return tree def patch_file(path, blocks): result = [] block = None with open(path, 'r') as file: for line in file.readlines(): if block: if line == block: result.append(line) block = None else: result.append(line) if line.strip().startswith('/* VOLK_GENERATE_'): block = line result.append(blocks[line.strip()[17:-3]]) with open(path, 'w') as file: for line in result: file.write(line) def is_descendant_type(types, name, base): if name == base: return True type = types.get(name) if not type: return False parents = type.get('parent') if not parents: return False return any([is_descendant_type(types, parent, base) for parent in parents.split(',')]) def defined(key): return 'defined(' + key + ')' if __name__ == "__main__": specpath = "https://raw.githubusercontent.com/KhronosGroup/Vulkan-Docs/master/xml/vk.xml" if len(sys.argv) > 1: specpath = sys.argv[1] spec = parse_xml(specpath) block_keys = ('DEVICE_TABLE', 'PROTOTYPES_H', 'PROTOTYPES_C', 'LOAD_LOADER', 'LOAD_INSTANCE', 'LOAD_DEVICE', 'LOAD_DEVICE_TABLE') blocks = {} version = spec.find('types/type[name="VK_HEADER_VERSION"]') blocks['VERSION'] = '#define VOLK_HEADER_VERSION ' + version.find('name').tail.strip() + '\n' command_groups = OrderedDict() for feature in spec.findall('feature'): key = defined(feature.get('name')) cmdrefs = feature.findall('require/command') command_groups[key] = [cmdref.get('name') for cmdref in cmdrefs] for ext in sorted(spec.findall('extensions/extension'), key=lambda ext: ext.get('name')): name = ext.get('name') for req in ext.findall('require'): key = defined(name) if req.get('feature'): key += ' && ' + defined(req.get('feature')) if req.get('extension'): key += ' && ' + defined(req.get('extension')) cmdrefs = req.findall('command') command_groups.setdefault(key, []).extend([cmdref.get('name') for cmdref in cmdrefs]) commands_to_groups = OrderedDict() for (group, cmdnames) in command_groups.items(): for name in cmdnames: commands_to_groups.setdefault(name, []).append(group) for (group, cmdnames) in command_groups.items(): command_groups[group] = [name for name in cmdnames if len(commands_to_groups[name]) == 1] for (name, groups) in commands_to_groups.items(): if len(groups) == 1: continue key = ' || '.join(['(' + g + ')' for g in groups]) command_groups.setdefault(key, []).append(name) commands = {} for cmd in spec.findall('commands/command'): if not cmd.get('alias'): name = cmd.findtext('proto/name') commands[name] = cmd for cmd in spec.findall('commands/command'): if cmd.get('alias'): name = cmd.get('name') commands[name] = commands[cmd.get('alias')] types = {} for type in spec.findall('types/type'): name = type.findtext('name') if name: types[name] = type for key in block_keys: blocks[key] = '' for (group, cmdnames) in command_groups.items(): ifdef = '#if ' + group + '\n' for key in block_keys: blocks[key] += ifdef for name in sorted(cmdnames): cmd = commands[name] type = cmd.findtext('param[1]/type') if name == 'vkGetInstanceProcAddr': type = '' if name == 'vkGetDeviceProcAddr': type = 'VkInstance' if is_descendant_type(types, type, 'VkDevice'): blocks['LOAD_DEVICE'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n' blocks['DEVICE_TABLE'] += '\tPFN_' + name + ' ' + name + ';\n' blocks['LOAD_DEVICE_TABLE'] += '\ttable->' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n' elif is_descendant_type(types, type, 'VkInstance'): blocks['LOAD_INSTANCE'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n' elif type != '': blocks['LOAD_LOADER'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n' blocks['PROTOTYPES_H'] += 'extern PFN_' + name + ' ' + name + ';\n' blocks['PROTOTYPES_C'] += 'PFN_' + name + ' ' + name + ';\n' for key in block_keys: if blocks[key].endswith(ifdef): blocks[key] = blocks[key][:-len(ifdef)] else: blocks[key] += '#endif /* ' + group + ' */\n' patch_file('volk.h', blocks) patch_file('volk.c', blocks)
29.25974
130
0.640923
from collections import OrderedDict import sys import urllib import xml.etree.ElementTree as etree import urllib.request def parse_xml(path): file = urllib.request.urlopen(path) if path.startswith("http") else open(path, 'r') with file: tree = etree.parse(file) return tree def patch_file(path, blocks): result = [] block = None with open(path, 'r') as file: for line in file.readlines(): if block: if line == block: result.append(line) block = None else: result.append(line) if line.strip().startswith('/* VOLK_GENERATE_'): block = line result.append(blocks[line.strip()[17:-3]]) with open(path, 'w') as file: for line in result: file.write(line) def is_descendant_type(types, name, base): if name == base: return True type = types.get(name) if not type: return False parents = type.get('parent') if not parents: return False return any([is_descendant_type(types, parent, base) for parent in parents.split(',')]) def defined(key): return 'defined(' + key + ')' if __name__ == "__main__": specpath = "https://raw.githubusercontent.com/KhronosGroup/Vulkan-Docs/master/xml/vk.xml" if len(sys.argv) > 1: specpath = sys.argv[1] spec = parse_xml(specpath) block_keys = ('DEVICE_TABLE', 'PROTOTYPES_H', 'PROTOTYPES_C', 'LOAD_LOADER', 'LOAD_INSTANCE', 'LOAD_DEVICE', 'LOAD_DEVICE_TABLE') blocks = {} version = spec.find('types/type[name="VK_HEADER_VERSION"]') blocks['VERSION'] = '#define VOLK_HEADER_VERSION ' + version.find('name').tail.strip() + '\n' command_groups = OrderedDict() for feature in spec.findall('feature'): key = defined(feature.get('name')) cmdrefs = feature.findall('require/command') command_groups[key] = [cmdref.get('name') for cmdref in cmdrefs] for ext in sorted(spec.findall('extensions/extension'), key=lambda ext: ext.get('name')): name = ext.get('name') for req in ext.findall('require'): key = defined(name) if req.get('feature'): key += ' && ' + defined(req.get('feature')) if req.get('extension'): key += ' && ' + defined(req.get('extension')) cmdrefs = req.findall('command') command_groups.setdefault(key, []).extend([cmdref.get('name') for cmdref in cmdrefs]) commands_to_groups = OrderedDict() for (group, cmdnames) in command_groups.items(): for name in cmdnames: commands_to_groups.setdefault(name, []).append(group) for (group, cmdnames) in command_groups.items(): command_groups[group] = [name for name in cmdnames if len(commands_to_groups[name]) == 1] for (name, groups) in commands_to_groups.items(): if len(groups) == 1: continue key = ' || '.join(['(' + g + ')' for g in groups]) command_groups.setdefault(key, []).append(name) commands = {} for cmd in spec.findall('commands/command'): if not cmd.get('alias'): name = cmd.findtext('proto/name') commands[name] = cmd for cmd in spec.findall('commands/command'): if cmd.get('alias'): name = cmd.get('name') commands[name] = commands[cmd.get('alias')] types = {} for type in spec.findall('types/type'): name = type.findtext('name') if name: types[name] = type for key in block_keys: blocks[key] = '' for (group, cmdnames) in command_groups.items(): ifdef = '#if ' + group + '\n' for key in block_keys: blocks[key] += ifdef for name in sorted(cmdnames): cmd = commands[name] type = cmd.findtext('param[1]/type') if name == 'vkGetInstanceProcAddr': type = '' if name == 'vkGetDeviceProcAddr': type = 'VkInstance' if is_descendant_type(types, type, 'VkDevice'): blocks['LOAD_DEVICE'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n' blocks['DEVICE_TABLE'] += '\tPFN_' + name + ' ' + name + ';\n' blocks['LOAD_DEVICE_TABLE'] += '\ttable->' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n' elif is_descendant_type(types, type, 'VkInstance'): blocks['LOAD_INSTANCE'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n' elif type != '': blocks['LOAD_LOADER'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n' blocks['PROTOTYPES_H'] += 'extern PFN_' + name + ' ' + name + ';\n' blocks['PROTOTYPES_C'] += 'PFN_' + name + ' ' + name + ';\n' for key in block_keys: if blocks[key].endswith(ifdef): blocks[key] = blocks[key][:-len(ifdef)] else: blocks[key] += '#endif /* ' + group + ' */\n' patch_file('volk.h', blocks) patch_file('volk.c', blocks)
true
true
f7249360b251c64a2879289eb9c28f008a4fc9e6
8,859
py
Python
packages/dcos-integration-test/extra/test_legacy_user_management.py
wolf31o2/dcos
113b8abacfd6d517594f329b621aaf4641b535e7
[ "Apache-2.0" ]
null
null
null
packages/dcos-integration-test/extra/test_legacy_user_management.py
wolf31o2/dcos
113b8abacfd6d517594f329b621aaf4641b535e7
[ "Apache-2.0" ]
null
null
null
packages/dcos-integration-test/extra/test_legacy_user_management.py
wolf31o2/dcos
113b8abacfd6d517594f329b621aaf4641b535e7
[ "Apache-2.0" ]
null
null
null
""" A collection of tests covering legacy user management in DC/OS. Legacy user management is considered to be the user management API offered by `dcos-oauth` up to DC/OS release 1.12. Assume that access control is activated in Master Admin Router (could be disabled with `oauth_enabled`) and therefore authenticate individual HTTP dcos_api_session. One aspect of legacy DC/OS user management is that once authenticated a user can add other users. Unauthenticated HTTP dcos_api_session are rejected by Master Admin Router and user management fails (this is the coarse-grained authorization model of (open) DC/OS). Here, test that unauthenticated HTTP dcos_api_session cannot manage users. However, do not test that newly added users can add other users: in this test suite we are limited to having authentication state for just a single user available. This is why we can test managing other users only from that first user's point of view. That is, we can not test that a user (e.g. user2) which was added by the first user (user1) can add another user (user3). """ import logging import uuid import pytest from dcos_test_utils import dcos_cli from test_helpers import get_expanded_config __maintainer__ = 'jgehrcke' __contact__ = 'security-team@mesosphere.io' log = logging.getLogger(__name__) # Skip entire module in downstream integration tests. @pytest.fixture(autouse=True) def skip_in_downstream(): expanded_config = get_expanded_config() if 'security' in expanded_config: pytest.skip( 'Skip upstream-specific user management tests', allow_module_level=True ) def get_users(apisession): r = apisession.get('/acs/api/v1/users') r.raise_for_status() users = {u['uid']: u for u in r.json()['array']} return users def delete_user(apisession, uid): r = apisession.delete('/acs/api/v1/users/%s' % (uid, )) r.raise_for_status() assert r.status_code == 204 @pytest.fixture() def remove_users_added_by_test(dcos_api_session): users_before = set(get_users(dcos_api_session)) log.info('remove_users_added_by_test pre test: users are %s', users_before) try: yield finally: users_after = set(get_users(dcos_api_session)) new_uids = users_after - users_before for uid in new_uids: log.info('remove_users_added_by_test post test: remove `%s`', uid) delete_user(dcos_api_session, uid) def test_users_get(dcos_api_session): users = get_users(dcos_api_session) assert users required_keys = ('uid', 'description') for userdict in users.values(): for k in required_keys: assert k in userdict def test_user_put_no_email_uid_empty_body(dcos_api_session): # This test mainly demonstrates a subtle API difference between dcos-oauth # (legacy) and Bouncer. r = dcos_api_session.put('/acs/api/v1/users/user1') # This is the old behavior in dcos-oauth. # assert r.status_code == 500 # assert 'invalid email' in r.text # With Bouncer non-email uids are valid, and the request fails as of the # missing request body. assert r.status_code == 400 assert 'Request has bad Content-Type or lacks JSON data' in r.text @pytest.mark.usefixtures('remove_users_added_by_test') def test_legacy_user_creation_with_empty_json_doc(dcos_api_session): # Legacy HTTP clients built for dcos-oauth such as the web UI (up to DC/OS # 1.12) might insert users in the following way: uid appears to be an email # address, and the JSON document in the request body does not provide a # `public_key` or a `password` property (indicating local user), or is # empty. The legacy web UI would insert users like that and expect those # users to be remote users, usable with the legacy OIDC ID Token login # method through the 'https://dcos.auth0.com/' provider. This behavior is # maintained in Bouncer for backwards compatibility. r = dcos_api_session.put('/acs/api/v1/users/user@domain.foo', json={}) assert r.status_code == 201, r.text # Bouncer annotates the created user (this is new compared to dcos-oauth). r = dcos_api_session.get('/acs/api/v1/users/user@domain.foo') assert r.json()['provider_type'] == 'oidc' assert r.json()['provider_id'] == 'https://dcos.auth0.com/' assert r.json()['is_remote'] is True # When the uid however does not appear to be an email address the more sane # behavior of Bouncer takes effect: an empty (meaningless) JSON body # results in a useful error message. r = dcos_api_session.put('/acs/api/v1/users/user1', json={}) assert r.status_code == 400 assert 'One of `password` or `public_key` must be provided' in r.text @pytest.mark.usefixtures('remove_users_added_by_test') def test_user_put_email_uid_and_description(dcos_api_session): r = dcos_api_session.put( '/acs/api/v1/users/user1@domain.foo', json={'description': 'integration test user'} ) assert r.status_code == 201, r.text users = get_users(dcos_api_session) assert len(users) > 1 assert 'user1@domain.foo' in users @pytest.mark.usefixtures('remove_users_added_by_test') def test_user_put_with_legacy_body(dcos_api_session): # The UI up to DC/OS 1.12 sends the `creator_uid` and the `cluster_url` # properties although they are not used by dcos-oauth. Bouncer supports # these two properties for legacy reasons. Note(JP): As a follow-up task we # should change the UI to not send these properties anymore, and then remove # the properties from Bouncer's UserCreate JSON schema again, ideally within # the 1.13 development cycle. r = dcos_api_session.put( '/acs/api/v1/users/user2@domain.foo', json={'creator_uid': 'any@thing.bla', 'cluster_url': 'foobar'} ) assert r.status_code == 201, r.text @pytest.mark.usefixtures('remove_users_added_by_test') def test_user_conflict(dcos_api_session): # Note: the empty request body is not the decisive criterion here. r = dcos_api_session.put('/acs/api/v1/users/user2@domain.foo', json={}) assert r.status_code == 201, r.text r = dcos_api_session.put('/acs/api/v1/users/user2@domain.foo', json={}) assert r.status_code == 409, r.text @pytest.mark.usefixtures('remove_users_added_by_test') def test_user_delete(dcos_api_session): r = dcos_api_session.put('/acs/api/v1/users/user6@domain.foo', json={}) r.raise_for_status() assert r.status_code == 201 r = dcos_api_session.delete('/acs/api/v1/users/user6@domain.foo') r.raise_for_status() assert r.status_code == 204 users = get_users(dcos_api_session) assert 'user6@domain.foo' not in users def test_user_put_requires_authentication(noauth_api_session): r = noauth_api_session.put('/acs/api/v1/users/user7@domain.foo', json={}) assert r.status_code == 401, r.text def test_dynamic_ui_config(dcos_api_session): r = dcos_api_session.get('/dcos-metadata/ui-config.json') data = r.json() assert not data['clusterConfiguration']['firstUser'] assert 'id' in data['clusterConfiguration'] assert 'uiConfiguration' in data def test_dcos_add_user(dcos_api_session): """ dcos_add_user.py script adds a user to IAM using the script dcos_add_user.py. """ email_address = uuid.uuid4().hex + '@example.com' cli = dcos_cli.DcosCli('') command = ['python', '/opt/mesosphere/bin/dcos_add_user.py', email_address] cli.exec_command(command) try: r = dcos_api_session.get('/acs/api/v1/users') r.raise_for_status() expected_user_data = { "uid": email_address, "description": "", "url": "/acs/api/v1/users/" + email_address, "is_remote": True, "is_service": False, "provider_type": "oidc", "provider_id": "https://dcos.auth0.com/" } assert expected_user_data in r.json()['array'] finally: delete_user(dcos_api_session, email_address) def test_check_message_on_adding_user_twice(dcos_api_session): """ Check that the correct message is emitted on adding the same user for the second time. """ email_address = uuid.uuid4().hex + '@example.com' cli = dcos_cli.DcosCli('') command = ['python', '/opt/mesosphere/bin/dcos_add_user.py', email_address] stdout, stderr = cli.exec_command(command) try: expected_output = '[INFO] Created IAM user `' + email_address + '`\n' assert '' == stdout assert expected_output == stderr stdout, stderr = cli.exec_command(command) expected_error = '[INFO] User `' + email_address + '` already exists\n' assert expected_error == stderr assert '' == stdout finally: delete_user(dcos_api_session, email_address)
36.607438
80
0.703804
import logging import uuid import pytest from dcos_test_utils import dcos_cli from test_helpers import get_expanded_config __maintainer__ = 'jgehrcke' __contact__ = 'security-team@mesosphere.io' log = logging.getLogger(__name__) @pytest.fixture(autouse=True) def skip_in_downstream(): expanded_config = get_expanded_config() if 'security' in expanded_config: pytest.skip( 'Skip upstream-specific user management tests', allow_module_level=True ) def get_users(apisession): r = apisession.get('/acs/api/v1/users') r.raise_for_status() users = {u['uid']: u for u in r.json()['array']} return users def delete_user(apisession, uid): r = apisession.delete('/acs/api/v1/users/%s' % (uid, )) r.raise_for_status() assert r.status_code == 204 @pytest.fixture() def remove_users_added_by_test(dcos_api_session): users_before = set(get_users(dcos_api_session)) log.info('remove_users_added_by_test pre test: users are %s', users_before) try: yield finally: users_after = set(get_users(dcos_api_session)) new_uids = users_after - users_before for uid in new_uids: log.info('remove_users_added_by_test post test: remove `%s`', uid) delete_user(dcos_api_session, uid) def test_users_get(dcos_api_session): users = get_users(dcos_api_session) assert users required_keys = ('uid', 'description') for userdict in users.values(): for k in required_keys: assert k in userdict def test_user_put_no_email_uid_empty_body(dcos_api_session): r = dcos_api_session.put('/acs/api/v1/users/user1') assert r.status_code == 400 assert 'Request has bad Content-Type or lacks JSON data' in r.text @pytest.mark.usefixtures('remove_users_added_by_test') def test_legacy_user_creation_with_empty_json_doc(dcos_api_session): r = dcos_api_session.put('/acs/api/v1/users/user@domain.foo', json={}) assert r.status_code == 201, r.text r = dcos_api_session.get('/acs/api/v1/users/user@domain.foo') assert r.json()['provider_type'] == 'oidc' assert r.json()['provider_id'] == 'https://dcos.auth0.com/' assert r.json()['is_remote'] is True r = dcos_api_session.put('/acs/api/v1/users/user1', json={}) assert r.status_code == 400 assert 'One of `password` or `public_key` must be provided' in r.text @pytest.mark.usefixtures('remove_users_added_by_test') def test_user_put_email_uid_and_description(dcos_api_session): r = dcos_api_session.put( '/acs/api/v1/users/user1@domain.foo', json={'description': 'integration test user'} ) assert r.status_code == 201, r.text users = get_users(dcos_api_session) assert len(users) > 1 assert 'user1@domain.foo' in users @pytest.mark.usefixtures('remove_users_added_by_test') def test_user_put_with_legacy_body(dcos_api_session): # the 1.13 development cycle. r = dcos_api_session.put( '/acs/api/v1/users/user2@domain.foo', json={'creator_uid': 'any@thing.bla', 'cluster_url': 'foobar'} ) assert r.status_code == 201, r.text @pytest.mark.usefixtures('remove_users_added_by_test') def test_user_conflict(dcos_api_session): # Note: the empty request body is not the decisive criterion here. r = dcos_api_session.put('/acs/api/v1/users/user2@domain.foo', json={}) assert r.status_code == 201, r.text r = dcos_api_session.put('/acs/api/v1/users/user2@domain.foo', json={}) assert r.status_code == 409, r.text @pytest.mark.usefixtures('remove_users_added_by_test') def test_user_delete(dcos_api_session): r = dcos_api_session.put('/acs/api/v1/users/user6@domain.foo', json={}) r.raise_for_status() assert r.status_code == 201 r = dcos_api_session.delete('/acs/api/v1/users/user6@domain.foo') r.raise_for_status() assert r.status_code == 204 users = get_users(dcos_api_session) assert 'user6@domain.foo' not in users def test_user_put_requires_authentication(noauth_api_session): r = noauth_api_session.put('/acs/api/v1/users/user7@domain.foo', json={}) assert r.status_code == 401, r.text def test_dynamic_ui_config(dcos_api_session): r = dcos_api_session.get('/dcos-metadata/ui-config.json') data = r.json() assert not data['clusterConfiguration']['firstUser'] assert 'id' in data['clusterConfiguration'] assert 'uiConfiguration' in data def test_dcos_add_user(dcos_api_session): email_address = uuid.uuid4().hex + '@example.com' cli = dcos_cli.DcosCli('') command = ['python', '/opt/mesosphere/bin/dcos_add_user.py', email_address] cli.exec_command(command) try: r = dcos_api_session.get('/acs/api/v1/users') r.raise_for_status() expected_user_data = { "uid": email_address, "description": "", "url": "/acs/api/v1/users/" + email_address, "is_remote": True, "is_service": False, "provider_type": "oidc", "provider_id": "https://dcos.auth0.com/" } assert expected_user_data in r.json()['array'] finally: delete_user(dcos_api_session, email_address) def test_check_message_on_adding_user_twice(dcos_api_session): email_address = uuid.uuid4().hex + '@example.com' cli = dcos_cli.DcosCli('') command = ['python', '/opt/mesosphere/bin/dcos_add_user.py', email_address] stdout, stderr = cli.exec_command(command) try: expected_output = '[INFO] Created IAM user `' + email_address + '`\n' assert '' == stdout assert expected_output == stderr stdout, stderr = cli.exec_command(command) expected_error = '[INFO] User `' + email_address + '` already exists\n' assert expected_error == stderr assert '' == stdout finally: delete_user(dcos_api_session, email_address)
true
true
f72495af7deabdbbf171c378cd19e6201f5d9763
3,359
py
Python
exercise/substitution_matrix.py
naiaralandeta/programming_naiara_landeta
f90f2d7d00bb84e4bc2b34f02c183fa0373fb97e
[ "MIT" ]
null
null
null
exercise/substitution_matrix.py
naiaralandeta/programming_naiara_landeta
f90f2d7d00bb84e4bc2b34f02c183fa0373fb97e
[ "MIT" ]
null
null
null
exercise/substitution_matrix.py
naiaralandeta/programming_naiara_landeta
f90f2d7d00bb84e4bc2b34f02c183fa0373fb97e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Dec 19 10:16:35 2019 @author: naiara """ # SCORE MATRIX OR SUBSTITUTION MATRIX """ 1 = "ACAGGTGGACCT" 2 = "ACTGGTCGACTC" P(A) = 5/24 P(A, A) = 2/12 P(C, C) = 2/12 P(G, T) = 1 P(C) = 6/24 P(A, C) = 1 P(C, G) = 1/12 P(T, T) =1/12 P(G) = 7/24 P(A, G) = 1 P(C, T) = 1/12 P(T) = 6/24 P(A, T) = 1/12 P(G, G) = 3/12 We assume that whe the pairs do not exist the probability will be 1 s(a,b) = int[k * log(P(ab) / Qa * Qb] -> K = 1 A C T G A 1.4294292643817876 1.2833012287035497 1.2163544390729364 1.3180633349627615 C 1.2833012287035497 1.271066772286538 1.1719352992845236 1.2388820889151366 T 1.2163544390729364 1.1719352992845236 1.1671364164027547 1.1371731930253115 G 1.3180633349627615 1.2388820889151366 1.1371731930253115 1.271066772286538 """ import math seq1 = "ACAGGTGGACCT" seq2 = "ACTGGTCGACTT" seq = "CTATATGG" seq = "CCGGATCG" def print_matrix(row): var = ""; list_bases = ["A", "C", "G", "T"] for i in range(len(row)): var += list_bases[i] + "\t" for j in row[i]: var += str(j) + "\t" var += "\n" print(var) def sustitution_matrix(seq1, seq2): if len(seq1) == len(seq2): prob_res = {}; list_pairs = []; dic_pairs = {}; list_bases = ["A", "C", "G", "T"]; k = 1; result_list = []; bases = ""; total_pairs = 0 total_residuos = len(seq1) + len(seq2) prob_res["A"] = (seq1.count("A") + seq2.count("A")) / total_residuos prob_res["C"] = (seq1.count("C") + seq2.count("C")) / total_residuos prob_res["G"] = (seq1.count("G") + seq2.count("G")) / total_residuos prob_res["T"] = (seq1.count("T") + seq2.count("T")) / total_residuos print(prob_res, "\n") for i in range(len(seq1)): list_pairs.append(seq1[i] + seq2[i]) for j in list_pairs: if not j in dic_pairs: bases = j if not bases[::-1] in dic_pairs: if j == bases[::-1]: total_pairs = list_pairs.count(j) else: total_pairs = list_pairs.count(j) + list_pairs.count(bases[::-1]) dic_pairs[j] = total_pairs / len(seq1) dic_pairs[bases[::-1]] = dic_pairs[j] print(dic_pairs, "\n") for i in range(len(list_bases)): list_prob = [] for j in range(len(list_bases)): pro_1 = prob_res[list_bases[i]] pro_2 = prob_res[list_bases[j]] if (list_bases[i]+list_bases[j] in dic_pairs): pro_both = dic_pairs[list_bases[i]+list_bases[j]] + 1 else: pro_both = 1 if pro_1 == 0 or pro_2 == 0: list_prob.append(0) else: list_prob.append( k * math.log10(pro_both / (pro_1 * pro_2))) result_list.append(list_prob) else: print("Length of the sequences are different") return result_list print_matrix(sustitution_matrix(seq1, seq2))
33.257426
90
0.504019
import math seq1 = "ACAGGTGGACCT" seq2 = "ACTGGTCGACTT" seq = "CTATATGG" seq = "CCGGATCG" def print_matrix(row): var = ""; list_bases = ["A", "C", "G", "T"] for i in range(len(row)): var += list_bases[i] + "\t" for j in row[i]: var += str(j) + "\t" var += "\n" print(var) def sustitution_matrix(seq1, seq2): if len(seq1) == len(seq2): prob_res = {}; list_pairs = []; dic_pairs = {}; list_bases = ["A", "C", "G", "T"]; k = 1; result_list = []; bases = ""; total_pairs = 0 total_residuos = len(seq1) + len(seq2) prob_res["A"] = (seq1.count("A") + seq2.count("A")) / total_residuos prob_res["C"] = (seq1.count("C") + seq2.count("C")) / total_residuos prob_res["G"] = (seq1.count("G") + seq2.count("G")) / total_residuos prob_res["T"] = (seq1.count("T") + seq2.count("T")) / total_residuos print(prob_res, "\n") for i in range(len(seq1)): list_pairs.append(seq1[i] + seq2[i]) for j in list_pairs: if not j in dic_pairs: bases = j if not bases[::-1] in dic_pairs: if j == bases[::-1]: total_pairs = list_pairs.count(j) else: total_pairs = list_pairs.count(j) + list_pairs.count(bases[::-1]) dic_pairs[j] = total_pairs / len(seq1) dic_pairs[bases[::-1]] = dic_pairs[j] print(dic_pairs, "\n") for i in range(len(list_bases)): list_prob = [] for j in range(len(list_bases)): pro_1 = prob_res[list_bases[i]] pro_2 = prob_res[list_bases[j]] if (list_bases[i]+list_bases[j] in dic_pairs): pro_both = dic_pairs[list_bases[i]+list_bases[j]] + 1 else: pro_both = 1 if pro_1 == 0 or pro_2 == 0: list_prob.append(0) else: list_prob.append( k * math.log10(pro_both / (pro_1 * pro_2))) result_list.append(list_prob) else: print("Length of the sequences are different") return result_list print_matrix(sustitution_matrix(seq1, seq2))
true
true